コード例 #1
0
ファイル: DumpNetCDF.java プロジェクト: jkachika/galileo
  public static void main(String[] args) throws Exception {
    DiskCache.setCachePolicy(true);

    File f = new File(args[0]);
    Pair<String, String> nameParts = FileNames.splitExtension(f);
    String ext = nameParts.b;
    if (ext.equals("grb") || ext.equals("bz2") || ext.equals("gz")) {
      Map<String, Metadata> metaMap = ConvertNetCDF.readFile(f.getAbsolutePath());

      /* Don't cache more than 1 GB: */
      DiskCache.cleanCache(1073741824, null);

      /*String[] attribs = { "temperature_surface",
      "total_cloud_cover_entire_atmosphere",
      "visibility_surface",
      "pressure_surface",
      "categorical_snow_yes1_no0_surface",
      "categorical_rain_yes1_no0_surface",
      "relative_humidity_zerodegc_isotherm" };*/
      String[] attribs = {"U-component_of_wind"};
      Metadata m = metaMap.get("9x");
      System.out.print("9x@" + m.getTemporalProperties().getStart() + "\t");
      for (String attrib : attribs) {
        System.out.print(m.getAttribute(attrib).getString() + "\t");
      }
      System.out.println();
    }
  }
コード例 #2
0
ファイル: ConvertNetCDF.java プロジェクト: jkachika/galileo
  public static void main(String[] args) throws Exception {
    DiskCache.setCachePolicy(true);

    File dir = new File(args[0]);
    for (File f : dir.listFiles()) {
      Pair<String, String> nameParts = FileNames.splitExtension(f);
      String ext = nameParts.b;
      if (ext.equals("grb") || ext.equals("bz2") || ext.equals("gz")) {
        Map<String, Metadata> metaMap = ConvertNetCDF.readFile(f.getAbsolutePath());

        /* Don't cache more than 1 GB: */
        DiskCache.cleanCache(1073741824, null);

        /* Now that we have geographically-partitioned files, let's pick
         * some attributes to store as indexable metadata. */

        /* Write converted files to disk */
        System.out.print("Writing converted files");
        int processed = 0;
        int increment = metaMap.keySet().size() / 50;
        for (String g : metaMap.keySet()) {
          Metadata meta = metaMap.get(g);

          /* Create the directory for this file */
          String storageDir = getStorageDir(args[1], meta);
          File destDir = new File(storageDir);
          if (!destDir.exists()) {
            if (!destDir.mkdirs()) {
              throw new IOException("Failed to create directory " + destDir);
            }
          }

          /* Create a file Block to store all the metadata in, and
           * generate a subset for indexing purposes. */
          Block block = createBlock(nameParts.a, meta);

          /* Write out the file */
          String outputName = nameParts.a + ".gblock";
          FileOutputStream fOut = new FileOutputStream(storageDir + "/" + outputName);
          fOut.write(Serializer.serialize(block));
          fOut.close();

          if (++processed % increment == 0) {
            System.out.print('.');
          }
        }
        System.out.println();
      }
    }
  }
コード例 #3
0
  void init(TdsContext tdsContext) {
    // new for 4.2 - feature collection caching
    String fcCache =
        ThreddsConfig.get(
            "FeatureCollection.cacheDirectory",
            tdsContext.getContentDirectory().getPath() + "/collectionCache/");
    try {
      thredds.inventory.bdb.MetadataManager.setCacheDirectory(fcCache);
      startupLog.info("CdmInit: FeatureCollection.cacheDirectory= " + fcCache);
    } catch (Exception e) {
      startupLog.error("CdmInit: Failed to open FeatureCollection.cacheDirectory= " + fcCache, e);
    }

    // new for 4.1 - ehcache object caching
    String ehConfig =
        ThreddsConfig.get("ehcache.configFile", tdsContext.getWebinfPath() + "/ehcache.xml");
    String ehDirectory =
        ThreddsConfig.get(
            "ehcache.directory", tdsContext.getContentDirectory().getPath() + "/ehcache/");
    try {
      cacheManager =
          thredds.filesystem.ControllerCaching.makeStandardController(ehConfig, ehDirectory);
      thredds.inventory.DatasetCollectionManager.setController(cacheManager);
      startupLog.info("CdmInit: ehcache.config= " + ehConfig + " directory= " + ehDirectory);

    } catch (IOException ioe) {
      startupLog.error("CdmInit: Cant read ehcache config file " + ehConfig, ioe);
    }

    boolean useBytesForDataSize =
        ThreddsConfig.getBoolean("catalogWriting.useBytesForDataSize", false);
    InvCatalogFactory10.useBytesForDataSize(useBytesForDataSize);

    ////////////////////////////////////
    // AggregationFmrc.setDefinitionDirectory(new File(tdsContext.getRootDirectory(),
    // fmrcDefinitionDirectory));
    FmrcInventoryServlet.setDefinitionDirectory(
        new File(tdsContext.getRootDirectory(), fmrcDefinitionDirectory));

    // NetcdfFileCache : default is allow 200 - 400 open files, cleanup every 10 minutes
    int min = ThreddsConfig.getInt("NetcdfFileCache.minFiles", 200);
    int max = ThreddsConfig.getInt("NetcdfFileCache.maxFiles", 400);
    int secs = ThreddsConfig.getSeconds("NetcdfFileCache.scour", 10 * 60);
    if (max > 0) {
      NetcdfDataset.initNetcdfFileCache(min, max, secs);
    }

    // HTTP file access : // allow 20 - 40 open datasets, cleanup every 10 minutes
    min = ThreddsConfig.getInt("HTTPFileCache.minFiles", 25);
    max = ThreddsConfig.getInt("HTTPFileCache.maxFiles", 40);
    secs = ThreddsConfig.getSeconds("HTTPFileCache.scour", 10 * 60);
    if (max > 0) {
      ServletUtil.setFileCache(new FileCacheRaf(min, max, secs));
    }

    // for backwards compatibility - should be replaced by direct specifying of the IndexExtendMode
    // turn off Grib extend indexing; indexes are automatically done every 10 minutes externally
    boolean extendIndex = ThreddsConfig.getBoolean("GribIndexing.setExtendIndex", false);
    GridServiceProvider.IndexExtendMode mode =
        extendIndex
            ? GridServiceProvider.IndexExtendMode.extendwrite
            : GridServiceProvider.IndexExtendMode.readonly;
    ucar.nc2.iosp.grid.GridServiceProvider.setIndexFileModeOnOpen(mode);
    ucar.nc2.iosp.grid.GridServiceProvider.setIndexFileModeOnSync(mode);

    boolean alwaysUseCache = ThreddsConfig.getBoolean("GribIndexing.alwaysUseCache", false);
    ucar.nc2.iosp.grid.GridServiceProvider.setIndexAlwaysInCache(alwaysUseCache);

    // optimization: netcdf-3 files can only grow, not have metadata changes
    ucar.nc2.NetcdfFile.setProperty("syncExtendOnly", "true");

    // persist joinNew aggregations. default every 24 hours, delete stuff older than 90 days
    String dir =
        ThreddsConfig.get(
            "AggregationCache.dir",
            new File(tdsContext.getContentDirectory().getPath(), "cacheAged").getPath());
    int scourSecs = ThreddsConfig.getSeconds("AggregationCache.scour", 24 * 60 * 60);
    int maxAgeSecs = ThreddsConfig.getSeconds("AggregationCache.maxAge", 90 * 24 * 60 * 60);
    aggCache = new DiskCache2(dir, false, maxAgeSecs / 60, scourSecs / 60);
    Aggregation.setPersistenceCache(aggCache);

    // how to choose the typical dataset ?
    String typicalDataset = ThreddsConfig.get("Aggregation.typicalDataset", "penultimate");
    Aggregation.setTypicalDatasetMode(typicalDataset);

    // Nj22 disk cache
    dir =
        ThreddsConfig.get(
            "DiskCache.dir", new File(tdsContext.getContentDirectory(), "cache").getPath());
    boolean alwaysUse = ThreddsConfig.getBoolean("DiskCache.alwaysUse", false);
    scourSecs = ThreddsConfig.getSeconds("DiskCache.scour", 60 * 60);
    long maxSize = ThreddsConfig.getBytes("DiskCache.maxSize", (long) 1000 * 1000 * 1000);
    DiskCache.setRootDirectory(dir);
    DiskCache.setCachePolicy(alwaysUse);

    Calendar c = Calendar.getInstance(); // contains current startup time
    c.add(Calendar.SECOND, scourSecs / 2); // starting in half the scour time
    timer = new Timer();
    timer.scheduleAtFixedRate(new CacheScourTask(maxSize), c.getTime(), (long) 1000 * scourSecs);

    startupLog.info("CdmInit complete");
  }