/**
   * Initializes store.
   *
   * @throws GridException If failed to initialize.
   */
  private void init() throws GridException {
    if (initGuard.compareAndSet(false, true)) {
      if (log.isDebugEnabled()) log.debug("Initializing cache store.");

      try {
        if (sesFactory != null)
          // Session factory has been provided - nothing to do.
          return;

        if (!F.isEmpty(hibernateCfgPath)) {
          try {
            URL url = new URL(hibernateCfgPath);

            sesFactory = new Configuration().configure(url).buildSessionFactory();

            if (log.isDebugEnabled()) log.debug("Configured session factory using URL: " + url);

            // Session factory has been successfully initialized.
            return;
          } catch (MalformedURLException e) {
            if (log.isDebugEnabled())
              log.debug("Caught malformed URL exception: " + e.getMessage());
          }

          // Provided path is not a valid URL. File?
          File cfgFile = new File(hibernateCfgPath);

          if (cfgFile.exists()) {
            sesFactory = new Configuration().configure(cfgFile).buildSessionFactory();

            if (log.isDebugEnabled())
              log.debug("Configured session factory using file: " + hibernateCfgPath);

            // Session factory has been successfully initialized.
            return;
          }

          // Provided path is not a file. Classpath resource?
          sesFactory = new Configuration().configure(hibernateCfgPath).buildSessionFactory();

          if (log.isDebugEnabled())
            log.debug("Configured session factory using classpath resource: " + hibernateCfgPath);
        } else {
          if (hibernateProps == null) {
            U.warn(
                log, "No Hibernate configuration has been provided for store (will use default).");

            hibernateProps = new Properties();

            hibernateProps.setProperty("hibernate.connection.url", DFLT_CONN_URL);
            hibernateProps.setProperty("hibernate.show_sql", DFLT_SHOW_SQL);
            hibernateProps.setProperty("hibernate.hbm2ddl.auto", DFLT_HBM2DDL_AUTO);
          }

          Configuration cfg = new Configuration();

          cfg.setProperties(hibernateProps);

          assert resourceAvailable(MAPPING_RESOURCE);

          cfg.addResource(MAPPING_RESOURCE);

          sesFactory = cfg.buildSessionFactory();

          if (log.isDebugEnabled())
            log.debug("Configured session factory using properties: " + hibernateProps);
        }
      } catch (HibernateException e) {
        throw new GridException("Failed to initialize store.", e);
      } finally {
        initLatch.countDown();
      }
    } else if (initLatch.getCount() > 0) U.await(initLatch);

    if (sesFactory == null) throw new GridException("Cache store was not properly initialized.");
  }
  /**
   * Test how IPC cache map works.
   *
   * @throws Exception If failed.
   */
  @SuppressWarnings("unchecked")
  public void testIpcCache() throws Exception {
    Field cacheField = GridGgfsHadoopIpcIo.class.getDeclaredField("ipcCache");

    cacheField.setAccessible(true);

    Field activeCntField = GridGgfsHadoopIpcIo.class.getDeclaredField("activeCnt");

    activeCntField.setAccessible(true);

    Map<String, GridGgfsHadoopIpcIo> cache =
        (Map<String, GridGgfsHadoopIpcIo>) cacheField.get(null);

    String name = "ggfs:" + getTestGridName(0) + "@";

    Configuration cfg = new Configuration();

    cfg.addResource(U.resolveGridGainUrl(HADOOP_FS_CFG));
    cfg.setBoolean("fs.ggfs.impl.disable.cache", true);
    cfg.setBoolean(String.format(GridGgfsHadoopUtils.PARAM_GGFS_ENDPOINT_NO_EMBED, name), true);

    // Ensure that existing IO is reused.
    FileSystem fs1 = FileSystem.get(new URI("ggfs://" + name + "/"), cfg);

    assertEquals(1, cache.size());

    GridGgfsHadoopIpcIo io = null;

    System.out.println("CACHE: " + cache);

    for (String key : cache.keySet()) {
      if (key.contains("10500")) {
        io = cache.get(key);

        break;
      }
    }

    assert io != null;

    assertEquals(1, ((AtomicInteger) activeCntField.get(io)).get());

    // Ensure that when IO is used by multiple file systems and one of them is closed, IO is not
    // stopped.
    FileSystem fs2 = FileSystem.get(new URI("ggfs://" + name + "/abc"), cfg);

    assertEquals(1, cache.size());
    assertEquals(2, ((AtomicInteger) activeCntField.get(io)).get());

    fs2.close();

    assertEquals(1, cache.size());
    assertEquals(1, ((AtomicInteger) activeCntField.get(io)).get());

    Field stopField = GridGgfsHadoopIpcIo.class.getDeclaredField("stopping");

    stopField.setAccessible(true);

    assert !(Boolean) stopField.get(io);

    // Ensure that IO is stopped when nobody else is need it.
    fs1.close();

    assert cache.isEmpty();

    assert (Boolean) stopField.get(io);
  }