public static void main(String[] args) throws Exception { DiskCache.setCachePolicy(true); File f = new File(args[0]); Pair<String, String> nameParts = FileNames.splitExtension(f); String ext = nameParts.b; if (ext.equals("grb") || ext.equals("bz2") || ext.equals("gz")) { Map<String, Metadata> metaMap = ConvertNetCDF.readFile(f.getAbsolutePath()); /* Don't cache more than 1 GB: */ DiskCache.cleanCache(1073741824, null); /*String[] attribs = { "temperature_surface", "total_cloud_cover_entire_atmosphere", "visibility_surface", "pressure_surface", "categorical_snow_yes1_no0_surface", "categorical_rain_yes1_no0_surface", "relative_humidity_zerodegc_isotherm" };*/ String[] attribs = {"U-component_of_wind"}; Metadata m = metaMap.get("9x"); System.out.print("9x@" + m.getTemporalProperties().getStart() + "\t"); for (String attrib : attribs) { System.out.print(m.getAttribute(attrib).getString() + "\t"); } System.out.println(); } }
public static void main(String[] args) throws Exception { DiskCache.setCachePolicy(true); File dir = new File(args[0]); for (File f : dir.listFiles()) { Pair<String, String> nameParts = FileNames.splitExtension(f); String ext = nameParts.b; if (ext.equals("grb") || ext.equals("bz2") || ext.equals("gz")) { Map<String, Metadata> metaMap = ConvertNetCDF.readFile(f.getAbsolutePath()); /* Don't cache more than 1 GB: */ DiskCache.cleanCache(1073741824, null); /* Now that we have geographically-partitioned files, let's pick * some attributes to store as indexable metadata. */ /* Write converted files to disk */ System.out.print("Writing converted files"); int processed = 0; int increment = metaMap.keySet().size() / 50; for (String g : metaMap.keySet()) { Metadata meta = metaMap.get(g); /* Create the directory for this file */ String storageDir = getStorageDir(args[1], meta); File destDir = new File(storageDir); if (!destDir.exists()) { if (!destDir.mkdirs()) { throw new IOException("Failed to create directory " + destDir); } } /* Create a file Block to store all the metadata in, and * generate a subset for indexing purposes. */ Block block = createBlock(nameParts.a, meta); /* Write out the file */ String outputName = nameParts.a + ".gblock"; FileOutputStream fOut = new FileOutputStream(storageDir + "/" + outputName); fOut.write(Serializer.serialize(block)); fOut.close(); if (++processed % increment == 0) { System.out.print('.'); } } System.out.println(); } } }
public static void main(String[] args) throws IOException { DiskCache.setRootDirectory("C:/temp/chill/"); make("C:/junk.txt"); make("C:/some/enchanted/evening/joots+3478.txt"); make("http://www.unidata.ucar.edu/some/enc hanted/eve'ning/nowrite.gibberish"); showCache(System.out); StringBuilder sbuff = new StringBuilder(); cleanCache(1000 * 1000 * 10, sbuff); System.out.println(sbuff); }
/** * debug * * @param filename look for this file * @throws java.io.IOException if read error */ static void make(String filename) throws IOException { File want = DiskCache.getCacheFile(filename); System.out.println("make=" + want.getPath() + "; exists = " + want.exists()); if (!want.exists()) want.createNewFile(); System.out.println( " canRead= " + want.canRead() + " canWrite = " + want.canWrite() + " lastMod = " + new Date(want.lastModified())); System.out.println(" original=" + filename); }
void init(TdsContext tdsContext) { // new for 4.2 - feature collection caching String fcCache = ThreddsConfig.get( "FeatureCollection.cacheDirectory", tdsContext.getContentDirectory().getPath() + "/collectionCache/"); try { thredds.inventory.bdb.MetadataManager.setCacheDirectory(fcCache); startupLog.info("CdmInit: FeatureCollection.cacheDirectory= " + fcCache); } catch (Exception e) { startupLog.error("CdmInit: Failed to open FeatureCollection.cacheDirectory= " + fcCache, e); } // new for 4.1 - ehcache object caching String ehConfig = ThreddsConfig.get("ehcache.configFile", tdsContext.getWebinfPath() + "/ehcache.xml"); String ehDirectory = ThreddsConfig.get( "ehcache.directory", tdsContext.getContentDirectory().getPath() + "/ehcache/"); try { cacheManager = thredds.filesystem.ControllerCaching.makeStandardController(ehConfig, ehDirectory); thredds.inventory.DatasetCollectionManager.setController(cacheManager); startupLog.info("CdmInit: ehcache.config= " + ehConfig + " directory= " + ehDirectory); } catch (IOException ioe) { startupLog.error("CdmInit: Cant read ehcache config file " + ehConfig, ioe); } boolean useBytesForDataSize = ThreddsConfig.getBoolean("catalogWriting.useBytesForDataSize", false); InvCatalogFactory10.useBytesForDataSize(useBytesForDataSize); //////////////////////////////////// // AggregationFmrc.setDefinitionDirectory(new File(tdsContext.getRootDirectory(), // fmrcDefinitionDirectory)); FmrcInventoryServlet.setDefinitionDirectory( new File(tdsContext.getRootDirectory(), fmrcDefinitionDirectory)); // NetcdfFileCache : default is allow 200 - 400 open files, cleanup every 10 minutes int min = ThreddsConfig.getInt("NetcdfFileCache.minFiles", 200); int max = ThreddsConfig.getInt("NetcdfFileCache.maxFiles", 400); int secs = ThreddsConfig.getSeconds("NetcdfFileCache.scour", 10 * 60); if (max > 0) { NetcdfDataset.initNetcdfFileCache(min, max, secs); } // HTTP file access : // allow 20 - 40 open datasets, cleanup every 10 minutes min = ThreddsConfig.getInt("HTTPFileCache.minFiles", 25); max = ThreddsConfig.getInt("HTTPFileCache.maxFiles", 40); secs = ThreddsConfig.getSeconds("HTTPFileCache.scour", 10 * 60); if (max > 0) { ServletUtil.setFileCache(new FileCacheRaf(min, max, secs)); } // for backwards compatibility - should be replaced by direct specifying of the IndexExtendMode // turn off Grib extend indexing; indexes are automatically done every 10 minutes externally boolean extendIndex = ThreddsConfig.getBoolean("GribIndexing.setExtendIndex", false); GridServiceProvider.IndexExtendMode mode = extendIndex ? GridServiceProvider.IndexExtendMode.extendwrite : GridServiceProvider.IndexExtendMode.readonly; ucar.nc2.iosp.grid.GridServiceProvider.setIndexFileModeOnOpen(mode); ucar.nc2.iosp.grid.GridServiceProvider.setIndexFileModeOnSync(mode); boolean alwaysUseCache = ThreddsConfig.getBoolean("GribIndexing.alwaysUseCache", false); ucar.nc2.iosp.grid.GridServiceProvider.setIndexAlwaysInCache(alwaysUseCache); // optimization: netcdf-3 files can only grow, not have metadata changes ucar.nc2.NetcdfFile.setProperty("syncExtendOnly", "true"); // persist joinNew aggregations. default every 24 hours, delete stuff older than 90 days String dir = ThreddsConfig.get( "AggregationCache.dir", new File(tdsContext.getContentDirectory().getPath(), "cacheAged").getPath()); int scourSecs = ThreddsConfig.getSeconds("AggregationCache.scour", 24 * 60 * 60); int maxAgeSecs = ThreddsConfig.getSeconds("AggregationCache.maxAge", 90 * 24 * 60 * 60); aggCache = new DiskCache2(dir, false, maxAgeSecs / 60, scourSecs / 60); Aggregation.setPersistenceCache(aggCache); // how to choose the typical dataset ? String typicalDataset = ThreddsConfig.get("Aggregation.typicalDataset", "penultimate"); Aggregation.setTypicalDatasetMode(typicalDataset); // Nj22 disk cache dir = ThreddsConfig.get( "DiskCache.dir", new File(tdsContext.getContentDirectory(), "cache").getPath()); boolean alwaysUse = ThreddsConfig.getBoolean("DiskCache.alwaysUse", false); scourSecs = ThreddsConfig.getSeconds("DiskCache.scour", 60 * 60); long maxSize = ThreddsConfig.getBytes("DiskCache.maxSize", (long) 1000 * 1000 * 1000); DiskCache.setRootDirectory(dir); DiskCache.setCachePolicy(alwaysUse); Calendar c = Calendar.getInstance(); // contains current startup time c.add(Calendar.SECOND, scourSecs / 2); // starting in half the scour time timer = new Timer(); timer.scheduleAtFixedRate(new CacheScourTask(maxSize), c.getTime(), (long) 1000 * scourSecs); startupLog.info("CdmInit complete"); }
public void run() { StringBuilder sbuff = new StringBuilder(); DiskCache.cleanCache(maxBytes, sbuff); // 1 Gbyte sbuff.append("----------------------\n"); // cacheLog.info(sbuff.toString()); }
Level2VolumeScan(RandomAccessFile orgRaf, CancelTask cancelTask) throws IOException { this.raf = orgRaf; if (log.isDebugEnabled()) log.debug("Level2VolumeScan on " + raf.getLocation()); raf.seek(0); raf.order(RandomAccessFile.BIG_ENDIAN); // volume scan header dataFormat = raf.readString(8); raf.skipBytes(1); String volumeNo = raf.readString(3); title_julianDay = raf.readInt(); // since 1/1/70 title_msecs = raf.readInt(); stationId = raf.readString(4).trim(); // only in AR2V0001 if (log.isDebugEnabled()) log.debug(" dataFormat= " + dataFormat + " stationId= " + stationId); if (stationId.length() == 0) { // try to get it from the filename LOOK stationId = null; } // try to find the station if (stationId != null) { if (!stationId.startsWith("K") && stationId.length() == 4) { String _stationId = "K" + stationId; station = NexradStationDB.get(_stationId); } else station = NexradStationDB.get(stationId); } // see if we have to uncompress if (dataFormat.equals(AR2V0001) || dataFormat.equals(AR2V0003) || dataFormat.equals(AR2V0004) || dataFormat.equals(AR2V0006)) { raf.skipBytes(4); String BZ = raf.readString(2); if (BZ.equals("BZ")) { RandomAccessFile uraf; File uncompressedFile = DiskCache.getFileStandardPolicy(raf.getLocation() + ".uncompress"); if (uncompressedFile.exists() && uncompressedFile.length() > 0) { // see if its locked - another thread is writing it FileInputStream fstream = null; FileLock lock = null; try { fstream = new FileInputStream(uncompressedFile); // lock = fstream.getChannel().lock(0, 1, true); // wait till its unlocked while (true) { // loop waiting for the lock try { lock = fstream.getChannel().lock(0, 1, true); // wait till its unlocked break; } catch (OverlappingFileLockException oe) { // not sure why lock() doesnt block try { Thread.sleep(100); // msecs } catch (InterruptedException e1) { break; } } } } finally { if (lock != null) lock.release(); if (fstream != null) fstream.close(); } uraf = new ucar.unidata.io.RandomAccessFile(uncompressedFile.getPath(), "r"); } else { // nope, gotta uncompress it uraf = uncompress(raf, uncompressedFile.getPath()); if (log.isDebugEnabled()) log.debug("made uncompressed file= " + uncompressedFile.getPath()); } // switch to uncompressed file raf.close(); raf = uraf; raf.order(RandomAccessFile.BIG_ENDIAN); } raf.seek(Level2Record.FILE_HEADER_SIZE); } List<Level2Record> reflectivity = new ArrayList<Level2Record>(); List<Level2Record> doppler = new ArrayList<Level2Record>(); List<Level2Record> highReflectivity = new ArrayList<Level2Record>(); List<Level2Record> highVelocity = new ArrayList<Level2Record>(); List<Level2Record> highSpectrum = new ArrayList<Level2Record>(); List<Level2Record> highDiffReflectivity = new ArrayList<Level2Record>(); List<Level2Record> highDiffPhase = new ArrayList<Level2Record>(); List<Level2Record> highCorreCoefficient = new ArrayList<Level2Record>(); long message_offset31 = 0; int recno = 0; while (true) { Level2Record r = Level2Record.factory(raf, recno++, message_offset31); if (r == null) break; if (showData) r.dump2(System.out); // skip non-data messages if (r.message_type == 31) { message_offset31 = message_offset31 + (r.message_size * 2 + 12 - 2432); } if (r.message_type != 1 && r.message_type != 31) { if (showMessages) r.dumpMessage(System.out); continue; } // if (showData) r.dump2(System.out); /* skip bad if (!r.checkOk()) { r.dump(System.out); continue; } */ // some global params if (vcp == 0) vcp = r.vcp; if (first == null) first = r; last = r; if (runCheck && !r.checkOk()) { continue; } if (r.hasReflectData) reflectivity.add(r); if (r.hasDopplerData) doppler.add(r); if (r.message_type == 31) { if (r.hasHighResREFData) highReflectivity.add(r); if (r.hasHighResVELData) highVelocity.add(r); if (r.hasHighResSWData) highSpectrum.add(r); if (r.hasHighResZDRData) highDiffReflectivity.add(r); if (r.hasHighResPHIData) highDiffPhase.add(r); if (r.hasHighResRHOData) highCorreCoefficient.add(r); } if ((cancelTask != null) && cancelTask.isCancel()) return; } if (debugRadials) System.out.println(" reflect ok= " + reflectivity.size() + " doppler ok= " + doppler.size()); if (highReflectivity.size() == 0) { reflectivityGroups = sortScans("reflect", reflectivity, 600); dopplerGroups = sortScans("doppler", doppler, 600); } if (highReflectivity.size() > 0) reflectivityHighResGroups = sortScans("reflect_HR", highReflectivity, 720); if (highVelocity.size() > 0) velocityHighResGroups = sortScans("velocity_HR", highVelocity, 720); if (highSpectrum.size() > 0) spectrumHighResGroups = sortScans("spectrum_HR", highSpectrum, 720); if (highDiffReflectivity.size() > 0) diffReflectHighResGroups = sortScans("diffReflect_HR", highDiffReflectivity, 720); if (highDiffPhase.size() > 0) diffPhaseHighResGroups = sortScans("diffPhase_HR", highDiffPhase, 720); if (highCorreCoefficient.size() > 0) coefficientHighResGroups = sortScans("coefficient_HR", highCorreCoefficient, 720); }