// scan has been done, create FmrcInv private FmrcInv makeFmrcInv(Formatter debug) throws IOException { try { Map<CalendarDate, FmrInv> fmrMap = new HashMap<CalendarDate, FmrInv>(); // all files are grouped by run date in an FmrInv List<FmrInv> fmrList = new ArrayList<FmrInv>(); // an fmrc is a collection of fmr // get the inventory, sorted by path for (MFile f : manager.getFiles()) { if (logger.isDebugEnabled()) logger.debug("Fmrc: " + config.spec + ": file=" + f.getPath()); GridDatasetInv inv = null; try { inv = GridDatasetInv.open( manager, f, config.innerNcml); // inventory is discovered for each GDS } catch (IOException ioe) { logger.warn("Error opening " + f.getPath() + "(skipped)", ioe); continue; // skip } CalendarDate runDate = inv.getRunDate(); if (debug != null) debug.format(" opened %s rundate = %s%n", f.getPath(), inv.getRunDateString()); // add to fmr for that rundate FmrInv fmr = fmrMap.get(runDate); if (fmr == null) { fmr = new FmrInv(runDate); fmrMap.put(runDate, fmr); fmrList.add(fmr); } fmr.addDataset(inv, debug); } if (debug != null) debug.format("%n"); // finish the FmrInv Collections.sort(fmrList); for (FmrInv fmr : fmrList) { fmr.finish(); if (logger.isDebugEnabled()) logger.debug( "Fmrc: spec=" + config.spec + ": fmr rundate=" + fmr.getRunDate() + " nfiles= " + fmr.getFiles().size()); } return new FmrcInv( "fmrc:" + manager.getCollectionName(), fmrList, config.fmrcConfig.regularize); } catch (Throwable t) { logger.error("makeFmrcInv", t); throw new RuntimeException(t); } }
// any time the server needs access to the dataset, it gets a "GuardedDataset" which allows us to // add caching // optionally, a session may be established, which allows us to reserve the dataset for that // session. protected GuardedDataset getDataset(ReqState preq) throws Exception { HttpServletRequest req = preq.getRequest(); String reqPath = preq.getDataSet(); // see if the client wants sessions boolean acceptSession = false; String s = req.getHeader("X-Accept-Session"); if (s != null && s.equalsIgnoreCase("true") && allowSessions) acceptSession = true; HttpSession session = null; if (acceptSession) { // see if theres already a session established, create one if not session = req.getSession(); if (!session.isNew()) { GuardedDataset gdataset = (GuardedDataset) session.getAttribute(reqPath); if (null != gdataset) { if (debugSession) System.out.printf(" found gdataset %s in session %s %n", reqPath, session.getId()); if (log.isDebugEnabled()) log.debug(" found gdataset " + gdataset + " in session " + session.getId()); return gdataset; } } } NetcdfFile ncd = DatasetHandler.getNetcdfFile(req, preq.getResponse(), reqPath); if (null == ncd) return null; GuardedDataset gdataset = new GuardedDatasetCacheAndClone(reqPath, ncd, acceptSession); // GuardedDataset gdataset = new GuardedDatasetImpl(reqPath, ncd, acceptSession); if (acceptSession) { String cookiePath = req.getRequestURI(); String suffix = "." + preq.getRequestSuffix(); if (cookiePath.endsWith(suffix)) // snip off the suffix cookiePath = cookiePath.substring(0, cookiePath.length() - suffix.length()); session.setAttribute(reqPath, gdataset); session.setAttribute(CookieFilter.SESSION_PATH, cookiePath); // session.setAttribute("dataset", ncd.getLocation()); // for UsageValve // session.setMaxInactiveInterval(30); // 30 second timeout !! if (debugSession) System.out.printf( " added gdataset %s in session %s cookiePath %s %n", reqPath, session.getId(), cookiePath); if (log.isDebugEnabled()) log.debug(" added gdataset " + gdataset + " in session " + session.getId()); } /* else { session = req.getSession(); session.setAttribute("dataset", ncd.getLocation()); // for UsageValve } */ return gdataset; }
/** Read instance content from given InputStream, which must not be closed. */ public void importContent(PanelInstance instance, InputStream is) throws Exception { HTMLText currentText = new HTMLText(); currentText.setPanelInstance(instance); ObjectInputStream ois = new ObjectInputStream(is); Map text = (Map) ois.readObject(); if (log.isDebugEnabled()) log.debug("Importing content: " + text); for (Iterator it = text.keySet().iterator(); it.hasNext(); ) { String lang = (String) it.next(); String value = (String) text.get(lang); currentText.setText(lang, value); } currentText.save(); }
/** Write instance content to given OutputStream, which must not be closed. */ public void exportContent(PanelInstance instance, OutputStream os) throws Exception { HTMLText text = load(instance); if (text == null) { try { text = new HTMLText(); text.setPanelInstance(instance); text.save(); } catch (Exception e) { log.error("Error creating empty HTMLText object", e); } } ObjectOutputStream oos = new ObjectOutputStream(os); if (log.isDebugEnabled()) log.debug("Exporting content: " + text.getText()); HashMap h = new HashMap(); // Avoids serializing a hibernate map h.putAll(text.getText()); oos.writeObject(h); }
public boolean readIndex(String filename, long gribLastModified, CollectionManager.Force force) throws IOException { File idxFile = GribCollection.getIndexFile(filename + GBX9_IDX); if (!idxFile.exists()) return false; long idxModified = idxFile.lastModified(); if ((force != CollectionManager.Force.nocheck) && (idxModified < gribLastModified)) return false; // force new index if file was updated FileInputStream fin = new FileInputStream(idxFile); // LOOK need DiskCache for non-writeable directories try { //// check header is ok if (!NcStream.readAndTest(fin, MAGIC_START.getBytes())) { log.info("Bad magic number of grib index, on file" + idxFile); return false; } int v = NcStream.readVInt(fin); if (v != version) { if ((v == 0) || (v > version)) throw new IOException( "Grib1Index found version " + v + ", want version " + version + " on " + filename); if (log.isDebugEnabled()) log.debug( "Grib1Index found version " + v + ", want version " + version + " on " + filename); return false; } int size = NcStream.readVInt(fin); if (size <= 0 || size > 100 * 1000 * 1000) { // try to catch garbage log.warn("Grib1Index bad size = {} for {} ", size, filename); return false; } byte[] m = new byte[size]; NcStream.readFully(fin, m); Grib1IndexProto.Grib1Index proto = Grib1IndexProto.Grib1Index.parseFrom(m); String fname = proto.getFilename(); if (debug) System.out.printf("%s for %s%n", fname, filename); gdsList = new ArrayList<Grib1SectionGridDefinition>(proto.getGdsListCount()); for (Grib1IndexProto.Grib1GdsSection pgds : proto.getGdsListList()) { Grib1SectionGridDefinition gds = readGds(pgds); gdsList.add(gds); } if (debug) System.out.printf(" read %d gds%n", gdsList.size()); records = new ArrayList<Grib1Record>(proto.getRecordsCount()); for (Grib1IndexProto.Grib1Record precord : proto.getRecordsList()) { records.add(readRecord(precord)); } if (debug) System.out.printf(" read %d records%n", records.size()); } catch (java.lang.NegativeArraySizeException e) { log.error("GribIndex failed on " + filename, e); return false; } catch (IOException e) { log.error("GribIndex failed on " + filename, e); return false; } finally { fin.close(); } return true; }
public File writeCoverageDataToFile( WcsRequest.Format format, LatLonRect bboxLatLonRect, AxisSubset vertSubset, List<String> rangeSubset, CalendarDateRange timeRange) throws WcsException { boolean zRangeDone = false; boolean tRangeDone = false; try { // Get the height range. Range zRange = vertSubset != null ? vertSubset.getRange() : null; zRangeDone = true; // Get the time range. Range tRange = null; if (timeRange != null) { CoordinateAxis1DTime timeAxis = this.coordSys.getTimeAxis1D(); int startIndex = timeAxis.findTimeIndexFromCalendarDate(timeRange.getStart()); int endIndex = timeAxis.findTimeIndexFromCalendarDate(timeRange.getEnd()); tRange = new Range(startIndex, endIndex); tRangeDone = true; } if (format == WcsRequest.Format.GeoTIFF || format == WcsRequest.Format.GeoTIFF_Float) { if (rangeSubset.size() != 1) { String msg = "GeoTIFF response encoding only available for single range field selection [" + rangeSubset + "]."; log.error("writeCoverageDataToFile(): " + msg); throw new WcsException(WcsException.Code.InvalidParameterValue, "RangeSubset", msg); } String reqRangeFieldName = rangeSubset.get(0); File dir = new File(getDiskCache().getRootDirectory()); File tifFile = File.createTempFile("WCS", ".tif", dir); if (log.isDebugEnabled()) log.debug("writeCoverageDataToFile(): tifFile=" + tifFile.getPath()); WcsRangeField rangeField = this.range.get(reqRangeFieldName); GridDatatype subset = rangeField.getGridDatatype().makeSubset(tRange, zRange, bboxLatLonRect, 1, 1, 1); Array data = subset.readDataSlice(0, 0, -1, -1); GeotiffWriter writer = new GeotiffWriter(tifFile.getPath()); writer.writeGrid( this.dataset.getDataset(), subset, data, format == WcsRequest.Format.GeoTIFF); writer.close(); return tifFile; } else if (format == WcsRequest.Format.NetCDF3) { File dir = new File(getDiskCache().getRootDirectory()); File outFile = File.createTempFile("WCS", ".nc", dir); if (log.isDebugEnabled()) log.debug("writeCoverageDataToFile(): ncFile=" + outFile.getPath()); // WTF ?? this.coordSys.getVerticalAxis().isNumeric(); NetcdfFileWriter writer = NetcdfFileWriter.createNew(NetcdfFileWriter.Version.netcdf3, outFile.getAbsolutePath()); CFGridWriter2.writeFile( this.dataset.getDataset(), rangeSubset, bboxLatLonRect, null, 1, zRange, timeRange, 1, true, writer); return outFile; } else { log.error( "writeCoverageDataToFile(): Unsupported response encoding format [" + format + "]."); throw new WcsException( WcsException.Code.InvalidFormat, "Format", "Unsupported response encoding format [" + format + "]."); } } catch (InvalidRangeException e) { String msg = "Failed to subset coverage [" + this.getName(); if (!zRangeDone) msg += "] along vertical axis [" + vertSubset + "]. "; else if (!tRangeDone) msg += "] along time axis [" + timeRange + "]. "; else msg += "] in horizontal plane [" + bboxLatLonRect + "]. "; log.error("writeCoverageDataToFile(): " + msg + e.getMessage()); throw new WcsException(WcsException.Code.CoverageNotDefined, "", msg); } catch (IOException e) { log.error( "writeCoverageDataToFile(): Failed to write file for requested coverage <" + this.getName() + ">: " + e.getMessage()); throw new WcsException( WcsException.Code.UNKNOWN, "", "Problem creating coverage [" + this.getName() + "]."); } }
/** * Write equivilent uncompressed version of the file. * * @param inputRaf file to uncompress * @param ufilename write to this file * @return raf of uncompressed file * @throws IOException on read error */ private RandomAccessFile uncompress(RandomAccessFile inputRaf, String ufilename) throws IOException { RandomAccessFile outputRaf = new RandomAccessFile(ufilename, "rw"); FileLock lock = null; while (true) { // loop waiting for the lock try { lock = outputRaf.getRandomAccessFile().getChannel().lock(0, 1, false); break; } catch (OverlappingFileLockException oe) { // not sure why lock() doesnt block try { Thread.sleep(100); // msecs } catch (InterruptedException e1) { } } } try { inputRaf.seek(0); byte[] header = new byte[Level2Record.FILE_HEADER_SIZE]; inputRaf.read(header); outputRaf.write(header); boolean eof = false; int numCompBytes; byte[] ubuff = new byte[40000]; byte[] obuff = new byte[40000]; try { CBZip2InputStream cbzip2 = new CBZip2InputStream(); while (!eof) { try { numCompBytes = inputRaf.readInt(); if (numCompBytes == -1) { if (log.isDebugEnabled()) log.debug(" done: numCompBytes=-1 "); break; } } catch (EOFException ee) { log.warn(" got EOFException "); break; // assume this is ok } if (log.isDebugEnabled()) { log.debug( "reading compressed bytes " + numCompBytes + " input starts at " + inputRaf.getFilePointer() + "; output starts at " + outputRaf.getFilePointer()); } /* * For some stupid reason, the last block seems to * have the number of bytes negated. So, we just * assume that any negative number (other than -1) * is the last block and go on our merry little way. */ if (numCompBytes < 0) { if (log.isDebugEnabled()) log.debug("last block?" + numCompBytes); numCompBytes = -numCompBytes; eof = true; } byte[] buf = new byte[numCompBytes]; inputRaf.readFully(buf); ByteArrayInputStream bis = new ByteArrayInputStream(buf, 2, numCompBytes - 2); // CBZip2InputStream cbzip2 = new CBZip2InputStream(bis); cbzip2.setStream(bis); int total = 0; int nread; /* while ((nread = cbzip2.read(ubuff)) != -1) { dout2.write(ubuff, 0, nread); total += nread; } */ try { while ((nread = cbzip2.read(ubuff)) != -1) { if (total + nread > obuff.length) { byte[] temp = obuff; obuff = new byte[temp.length * 2]; System.arraycopy(temp, 0, obuff, 0, temp.length); } System.arraycopy(ubuff, 0, obuff, total, nread); total += nread; } if (obuff.length >= 0) outputRaf.write(obuff, 0, total); } catch (BZip2ReadException ioe) { log.warn("Nexrad2IOSP.uncompress ", ioe); } float nrecords = (float) (total / 2432.0); if (log.isDebugEnabled()) log.debug( " unpacked " + total + " num bytes " + nrecords + " records; ouput ends at " + outputRaf.getFilePointer()); } } catch (Exception e) { if (outputRaf != null) outputRaf.close(); outputRaf = null; // dont leave bad files around File ufile = new File(ufilename); if (ufile.exists()) { if (!ufile.delete()) log.warn("failed to delete uncompressed file (IOException)" + ufilename); } if (e instanceof IOException) throw (IOException) e; else throw new RuntimeException(e); } } finally { if (null != outputRaf) outputRaf.flush(); if (lock != null) lock.release(); } return outputRaf; }
Level2VolumeScan(RandomAccessFile orgRaf, CancelTask cancelTask) throws IOException { this.raf = orgRaf; if (log.isDebugEnabled()) log.debug("Level2VolumeScan on " + raf.getLocation()); raf.seek(0); raf.order(RandomAccessFile.BIG_ENDIAN); // volume scan header dataFormat = raf.readString(8); raf.skipBytes(1); String volumeNo = raf.readString(3); title_julianDay = raf.readInt(); // since 1/1/70 title_msecs = raf.readInt(); stationId = raf.readString(4).trim(); // only in AR2V0001 if (log.isDebugEnabled()) log.debug(" dataFormat= " + dataFormat + " stationId= " + stationId); if (stationId.length() == 0) { // try to get it from the filename LOOK stationId = null; } // try to find the station if (stationId != null) { if (!stationId.startsWith("K") && stationId.length() == 4) { String _stationId = "K" + stationId; station = NexradStationDB.get(_stationId); } else station = NexradStationDB.get(stationId); } // see if we have to uncompress if (dataFormat.equals(AR2V0001) || dataFormat.equals(AR2V0003) || dataFormat.equals(AR2V0004) || dataFormat.equals(AR2V0006)) { raf.skipBytes(4); String BZ = raf.readString(2); if (BZ.equals("BZ")) { RandomAccessFile uraf; File uncompressedFile = DiskCache.getFileStandardPolicy(raf.getLocation() + ".uncompress"); if (uncompressedFile.exists() && uncompressedFile.length() > 0) { // see if its locked - another thread is writing it FileInputStream fstream = null; FileLock lock = null; try { fstream = new FileInputStream(uncompressedFile); // lock = fstream.getChannel().lock(0, 1, true); // wait till its unlocked while (true) { // loop waiting for the lock try { lock = fstream.getChannel().lock(0, 1, true); // wait till its unlocked break; } catch (OverlappingFileLockException oe) { // not sure why lock() doesnt block try { Thread.sleep(100); // msecs } catch (InterruptedException e1) { break; } } } } finally { if (lock != null) lock.release(); if (fstream != null) fstream.close(); } uraf = new ucar.unidata.io.RandomAccessFile(uncompressedFile.getPath(), "r"); } else { // nope, gotta uncompress it uraf = uncompress(raf, uncompressedFile.getPath()); if (log.isDebugEnabled()) log.debug("made uncompressed file= " + uncompressedFile.getPath()); } // switch to uncompressed file raf.close(); raf = uraf; raf.order(RandomAccessFile.BIG_ENDIAN); } raf.seek(Level2Record.FILE_HEADER_SIZE); } List<Level2Record> reflectivity = new ArrayList<Level2Record>(); List<Level2Record> doppler = new ArrayList<Level2Record>(); List<Level2Record> highReflectivity = new ArrayList<Level2Record>(); List<Level2Record> highVelocity = new ArrayList<Level2Record>(); List<Level2Record> highSpectrum = new ArrayList<Level2Record>(); List<Level2Record> highDiffReflectivity = new ArrayList<Level2Record>(); List<Level2Record> highDiffPhase = new ArrayList<Level2Record>(); List<Level2Record> highCorreCoefficient = new ArrayList<Level2Record>(); long message_offset31 = 0; int recno = 0; while (true) { Level2Record r = Level2Record.factory(raf, recno++, message_offset31); if (r == null) break; if (showData) r.dump2(System.out); // skip non-data messages if (r.message_type == 31) { message_offset31 = message_offset31 + (r.message_size * 2 + 12 - 2432); } if (r.message_type != 1 && r.message_type != 31) { if (showMessages) r.dumpMessage(System.out); continue; } // if (showData) r.dump2(System.out); /* skip bad if (!r.checkOk()) { r.dump(System.out); continue; } */ // some global params if (vcp == 0) vcp = r.vcp; if (first == null) first = r; last = r; if (runCheck && !r.checkOk()) { continue; } if (r.hasReflectData) reflectivity.add(r); if (r.hasDopplerData) doppler.add(r); if (r.message_type == 31) { if (r.hasHighResREFData) highReflectivity.add(r); if (r.hasHighResVELData) highVelocity.add(r); if (r.hasHighResSWData) highSpectrum.add(r); if (r.hasHighResZDRData) highDiffReflectivity.add(r); if (r.hasHighResPHIData) highDiffPhase.add(r); if (r.hasHighResRHOData) highCorreCoefficient.add(r); } if ((cancelTask != null) && cancelTask.isCancel()) return; } if (debugRadials) System.out.println(" reflect ok= " + reflectivity.size() + " doppler ok= " + doppler.size()); if (highReflectivity.size() == 0) { reflectivityGroups = sortScans("reflect", reflectivity, 600); dopplerGroups = sortScans("doppler", doppler, 600); } if (highReflectivity.size() > 0) reflectivityHighResGroups = sortScans("reflect_HR", highReflectivity, 720); if (highVelocity.size() > 0) velocityHighResGroups = sortScans("velocity_HR", highVelocity, 720); if (highSpectrum.size() > 0) spectrumHighResGroups = sortScans("spectrum_HR", highSpectrum, 720); if (highDiffReflectivity.size() > 0) diffReflectHighResGroups = sortScans("diffReflect_HR", highDiffReflectivity, 720); if (highDiffPhase.size() > 0) diffPhaseHighResGroups = sortScans("diffPhase_HR", highDiffPhase, 720); if (highCorreCoefficient.size() > 0) coefficientHighResGroups = sortScans("coefficient_HR", highCorreCoefficient, 720); }