public Object __tojava__(Class cls) throws IOException { if (OutputStream.class.isAssignableFrom(cls) && writing) return new FileOutputStream(file.getFD()); else if (InputStream.class.isAssignableFrom(cls) && reading) return new FileInputStream(file.getFD()); return super.__tojava__(cls); }
/** * Get an image from the disk cache. * * @param key The unique identifier for the bitmap * @return The bitmap or null if not found */ public Bitmap get(final String key) { synchronized (mLinkedHashMap) { final String file = mLinkedHashMap.get(key); if (file != null) { if (BuildConfig.DEBUG) { Log.d(TAG, "Disk cache hit"); } try { final RandomAccessFile raf = new RandomAccessFile(file, "r"); return BitmapFactory.decodeFileDescriptor(raf.getFD()); } catch (final IOException e) { Log.e(TAG, "getBitmap - " + e); } // return BitmapFactory.decodeFile(file); } else { final String existingFile = createFilePath(mCacheDir, key); if (new File(existingFile).exists()) { put(key, existingFile); if (BuildConfig.DEBUG) { Log.d(TAG, "Disk cache hit (existing file)"); } try { final RandomAccessFile raf = new RandomAccessFile(existingFile, "r"); return BitmapFactory.decodeFileDescriptor(raf.getFD()); } catch (final IOException e) { Log.e(TAG, "getBitmap - " + e); } // return BitmapFactory.decodeFile(existingFile); } } return null; } }
public static void secureDelete(File file, Random random) throws IOException { // FIXME somebody who understands these things should have a look at this... if (!file.exists()) return; long size = file.length(); if (size > 0) { RandomAccessFile raf = null; try { System.out.println( "Securely deleting " + file + " which is of length " + size + " bytes..."); raf = new RandomAccessFile(file, "rw"); raf.seek(0); long count; byte[] buf = new byte[4096]; // First zero it out count = 0; while (count < size) { int written = (int) Math.min(buf.length, size - count); raf.write(buf, 0, written); count += written; } raf.getFD().sync(); // Then ffffff it out for (int i = 0; i < buf.length; i++) buf[i] = (byte) 0xFF; raf.seek(0); count = 0; while (count < size) { int written = (int) Math.min(buf.length, size - count); raf.write(buf, 0, written); count += written; } raf.getFD().sync(); // Then random data random.nextBytes(buf); raf.seek(0); count = 0; while (count < size) { int written = (int) Math.min(buf.length, size - count); raf.write(buf, 0, written); count += written; } raf.getFD().sync(); raf.seek(0); // Then 0's again for (int i = 0; i < buf.length; i++) buf[i] = 0; count = 0; while (count < size) { int written = (int) Math.min(buf.length, size - count); raf.write(buf, 0, written); count += written; } raf.getFD().sync(); raf.close(); raf = null; } finally { Closer.close(raf); } } if ((!file.delete()) && file.exists()) throw new IOException("Unable to delete file " + file); }
/** * Persists the bloom filter to disk. * * @throws IOException if I/O errors are encountered. */ public void flush() throws IOException { cacheLock.writeLock().lock(); try { checkIfOpen(); if (cacheDirty && unflushedChanges != null && file != null) { final int offset = this.metadata.getHeaderLength(); // it's actually a disk-backed filter with changes if (unflushedChangeCounter.get() >= seekThreshold) { file.seek(offset); file.write(cache); // can probably be made more efficient file.getFD().sync(); } else { for (Map.Entry<Integer, Byte> change : unflushedChanges.entrySet()) { file.seek(change.getKey() + offset); file.write(change.getValue()); } } cacheDirty = false; unflushedChanges.clear(); unflushedChangeCounter.set(0); } } finally { cacheLock.writeLock().unlock(); } }
public void write(File to) throws IOException { Properties props = new Properties(); setFields(props, this); RandomAccessFile file = new RandomAccessFile(to, "rws"); FileOutputStream out = null; try { file.seek(0); out = new FileOutputStream(file.getFD()); /* * If server is interrupted before this line, the version file * will remain unchanged. */ props.store(out, null); /* * Now the new fields are flushed to the head of the file, but * file length can still be larger then required and therefore * the file can contain whole or corrupted fields from its old * contents in the end. If server is interrupted here and * restarted later these extra fields either should not effect * server behavior or should be handled by the server correctly. */ file.setLength(out.getChannel().position()); } finally { if (out != null) { out.close(); } file.close(); } }
public void close() throws IOException { flush(); if (!commitExecutor.isShutdown()) { commitExecutor.shutdown(); try { if (!commitExecutor.awaitTermination(5, TimeUnit.MINUTES)) throw new OException("Background data flush task can not be stopped."); } catch (InterruptedException e) { OLogManager.instance().error(this, "Data flush thread was interrupted"); Thread.interrupted(); throw new OException("Data flush thread was interrupted", e); } } synchronized (syncObject) { for (OFileClassic fileClassic : files.values()) { if (fileClassic.isOpen()) fileClassic.close(); } if (nameIdMapHolder != null) { nameIdMapHolder.setLength(0); for (Map.Entry<String, Long> entry : nameIdMap.entrySet()) { writeNameIdEntry(new NameFileIdEntry(entry.getKey(), entry.getValue()), false); } nameIdMapHolder.getFD().sync(); nameIdMapHolder.close(); } } }
protected void fsync(String name) throws IOException { File fullFile = new File(directory, name); boolean success = false; int retryCount = 0; IOException exc = null; while (!success && retryCount < 5) { retryCount++; RandomAccessFile file = null; try { try { file = new RandomAccessFile(fullFile, "rw"); file.getFD().sync(); success = true; } finally { if (file != null) file.close(); } } catch (IOException ioe) { if (exc == null) exc = ioe; try { // Pause 5 msec Thread.sleep(5); } catch (InterruptedException ie) { throw new ThreadInterruptedException(ie); } } } if (!success) // Throw original exception throw exc; }
/** Opens a file for writing. */ public TarOutputStream(final File fout, final boolean append) throws IOException { @SuppressWarnings("resource") RandomAccessFile raf = new RandomAccessFile(fout, "rw"); final long fileSize = fout.length(); if (append && fileSize > TarConstants.EOF_BLOCK) { raf.seek(fileSize - TarConstants.EOF_BLOCK); } out = new BufferedOutputStream(new FileOutputStream(raf.getFD())); }
public synchronized InputStream getBlockInputStream(Block b, long seekOffset) throws IOException { File blockFile = getBlockFile(b); RandomAccessFile blockInFile = new RandomAccessFile(blockFile, "r"); if (seekOffset > 0) { blockInFile.seek(seekOffset); } return new FileInputStream(blockInFile.getFD()); }
public OutputStream getOutputStream() throws IOException { if (!PMS.get().isWindows()) { LOGGER.trace("Opening file " + linuxPipeName + " for writing..."); RandomAccessFile raf = new RandomAccessFile(linuxPipeName, "rw"); return new FileOutputStream(raf.getFD()); } return mk.getWritable(); }
public InputStream getInputStream() throws IOException { if (!PMS.get().isWindows()) { LOGGER.trace("Opening file " + linuxPipeName + " for reading..."); RandomAccessFile raf = new RandomAccessFile(linuxPipeName, "r"); return new FileInputStream(raf.getFD()); } return mk.getReadable(); }
public static void main(String[] args) throws IOException { RandomAccessFile raf = new RandomAccessFile("classes/test/FileDescriptors.java", "r"); testDescriptorValidity(raf, raf.getFD()); FileInputStream fs1 = new FileInputStream("classes/test/FileDescriptors.java"); testDescriptorValidity(fs1, fs1.getFD()); File temp = File.createTempFile("Doppio-FileDescriptorsTest", ".txt"); FileOutputStream fs2 = new FileOutputStream(temp); testDescriptorValidity(fs2, fs2.getFD()); temp.delete(); // we don't support deleteOnExit either }
/** Returns handles to the block file and its metadata file */ public synchronized BlockInputStreams getTmpInputStreams(Block b, long blkOffset, long ckoff) throws IOException { DatanodeBlockInfo info = volumeMap.get(b); if (info == null) { throw new IOException("Block " + b + " does not exist in volumeMap."); } FSVolume v = info.getVolume(); File blockFile = v.getTmpFile(b); RandomAccessFile blockInFile = new RandomAccessFile(blockFile, "r"); if (blkOffset > 0) { blockInFile.seek(blkOffset); } File metaFile = getMetaFile(blockFile, b); RandomAccessFile metaInFile = new RandomAccessFile(metaFile, "r"); if (ckoff > 0) { metaInFile.seek(ckoff); } return new BlockInputStreams( new FileInputStream(blockInFile.getFD()), new FileInputStream(metaInFile.getFD())); }
protected void writeCorruptedData(RandomAccessFile file) throws IOException { final String messageForPreUpgradeVersion = "\nThis file is INTENTIONALLY CORRUPTED so that versions\n" + "of Hadoop prior to 0.13 (which are incompatible\n" + "with this directory layout) will fail to start.\n"; file.seek(0); file.writeInt(FSConstants.LAYOUT_VERSION); org.apache.hadoop.io.UTF8.writeString(file, ""); file.writeBytes(messageForPreUpgradeVersion); file.getFD().sync(); }
/** Synchs the file. */ public void synch() throws IOException { synchronized (data) { try { data.getFD().sync(); } catch (SyncFailedException e) { // A SyncFailedException seems to occur on some specific OS under // JDK 1.4.x. We ignore the exception which reduces the robustness // of the journal file for the OS where this problem occurs. // Unfortunately there's no sane way to handle this excption when it // does occur. } } }
// creates a new BloomFilter - access via BloomFilter.createOptimal(...) private BloomFilter( File f, int buckets, int hashFns, boolean force, int seekThreshold, BucketSize countBits, Allocator cacheAllocator, CloseCallback callback) throws IOException { this.closeCallback = callback; this.seekThreshold = seekThreshold; this.metadata = BloomMetadata.createNew(buckets, hashFns, countBits); hash = new RepeatedMurmurHash(hashFns, this.metadata.getBucketCount()); // creating a new filter - so I can just be lazy and start it zero'd cache = cacheAllocator.apply(this.metadata.getTotalLength() - this.metadata.getHeaderLength()); cacheDirty = true; open = true; if (f != null) { if (f.exists()) { if (force) { if (!f.delete()) { throw new IOException("Couldn't delete old file at " + f.getAbsolutePath()); } } else { throw new IllegalArgumentException( "Can't create a new BloomFilter at " + f.getAbsolutePath() + " since it already exists"); } } file = new RandomAccessFile(f, "rw"); this.metadata.writeToFile(file); file.setLength(metadata.getTotalLength()); file.getFD().sync(); unflushedChanges = new ConcurrentSkipListMap<Integer, Byte>(); if (f.length() != metadata.getTotalLength()) { throw new RuntimeException( "Bad size - expected " + metadata.getTotalLength() + " but got " + f.length()); } } else { unflushedChanges = null; // don't bother keeping track of unflushed changes if this is memory only file = null; } }
private void writeNameIdEntry(NameFileIdEntry nameFileIdEntry, boolean sync) throws IOException { nameIdMapHolder.seek(nameIdMapHolder.length()); final int nameSize = OStringSerializer.INSTANCE.getObjectSize(nameFileIdEntry.name); byte[] serializedName = new byte[nameSize]; OStringSerializer.INSTANCE.serialize(nameFileIdEntry.name, serializedName, 0); nameIdMapHolder.writeInt(nameSize); nameIdMapHolder.write(serializedName); nameIdMapHolder.writeLong(nameFileIdEntry.fileId); if (sync) nameIdMapHolder.getFD().sync(); }
public void playSound(int soundId, int grade, int subject) { if (mPlayer == null) { return; } int soundIndex; mPlayer.reset(); switch (soundId) { case GUIDE: soundIndex = (grade + 1) * 100 + subject + 1; break; case RIGHT: soundIndex = (int) (11 + Math.random() * 8); break; case WRONG: soundIndex = (int) (31 + Math.random() * 5); break; case EXAMPLE: case TEST: case ANSWER_IS_A: case ANSWER_IS_B: case ANSWER_IS_C: case ANSWER_IS_D: case RESULT_00: case RESULT_20: case RESULT_40: case RESULT_60: case RESULT_80: case RESULT_100: soundIndex = soundId; default: soundIndex = soundId; } Sound sound = mapping.get(soundIndex); try { if (sound != null) { mPlayer.setDataSource(mSound.getFD(), sound.mOffset, sound.mLength); mPlayer.prepare(); mPlayer.start(); } } catch (IOException e) { e.printStackTrace(); } }
/** * Prepares the recorder to begin capturing and encoding data. This method must be called after * setting up the desired audio and video sources, encoders, file format, etc., but before * start(). * * @throws IllegalStateException if it is called after start() or before setOutputFormat(). * @throws IOException if prepare fails otherwise. */ public void prepare() throws IllegalStateException, IOException { if (mPath != null) { RandomAccessFile file = new RandomAccessFile(mPath, "rws"); try { _setOutputFile(file.getFD(), 0, 0); } finally { file.close(); } } else if (mFd != null) { _setOutputFile(mFd, 0, 0); } else { throw new IOException("No valid output file"); } _prepare(); }
/** * Writes a bitmap to a file. Call {@link DiskLruCache#setCompressParams(CompressFormat, int)} * first to set the target bitmap compression and format. * * @param bitmap * @param file * @return */ private boolean writeBitmapToFile(final Bitmap bitmap, final String file) throws IOException, FileNotFoundException { if (bitmap == null) return false; OutputStream out = null; try { final RandomAccessFile raf = new RandomAccessFile(file, "rw"); out = new FileOutputStream(raf.getFD()); // out = new BufferedOutputStream(new FileOutputStream(file), // ImageLoaderUtils.IO_BUFFER_SIZE); return bitmap.compress(mCompressFormat, mCompressQuality, out); } finally { if (out != null) { out.close(); } } }
public void read(File from) throws IOException { RandomAccessFile file = new RandomAccessFile(from, "rws"); FileInputStream in = null; try { in = new FileInputStream(file.getFD()); file.seek(0); Properties props = new Properties(); props.load(in); getFields(props, this); } finally { if (in != null) { in.close(); } file.close(); } }
public void finish(UpdateLog.SyncLevel syncLevel) { if (syncLevel == UpdateLog.SyncLevel.NONE) return; try { synchronized (this) { fos.flushBuffer(); } if (syncLevel == UpdateLog.SyncLevel.FSYNC) { // Since fsync is outside of synchronized block, we can end up with a partial // last record on power failure (which is OK, and does not represent an error... // we just need to be aware of it when reading). raf.getFD().sync(); } } catch (IOException e) { throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e); } }
public synchronized void flush(List<Update> updates) { Assert.state(storedFile != null, "The FileStorage is not initialized"); try { for (Update update : updates) { ensureCapacity(update.index * BYTES_IN_LONG + BYTES_IN_LONG); if (activateMemoryMappedBuffers) { buffer.putLong(update.index * BYTES_IN_LONG, update.value); } else { storedFile.seek(update.index * BYTES_IN_LONG); storedFile.writeLong(update.value); } } if (activateMemoryMappedBuffers) { buffer.force(); } storedFile.getFD().sync(); } catch (Exception e) { throw new RuntimeException(e); } }
public long open(String path, String mode) throws IOException { if (!("r".equals(mode) || "w".equals(mode) || "rw".equals(mode))) { throw new IllegalArgumentException(); } try { FileDescriptor fileFD; File file; if (mode.equals("w")) { FileOutputStream f = new FileOutputStream(path); fileFD = f.getFD(); file = new OutputStreamFile(f, kem); } else { RandomAccessFile f = new RandomAccessFile(fileUtil.resolveWorkingDirectory(path), mode); fileFD = f.getFD(); file = new RandomAccessFileFile(f); } long fd = fdCounter++; descriptors.put(fd, fileFD); files.put(fileFD, file); return fd; } catch (FileNotFoundException e) { try { processFileNotFoundException(e); } catch (IOException ioe) { if (ioe.getMessage().equals("EISDIR") && mode.equals("r")) { // man 2 open says you can open a directory in readonly mode with open, but // java has no support for it. So we throw an UnsupportedOperationException // instead of failing with EISDIR kem.registerInternalWarning( "Unsupported file system behavior: tried to open a directory." + " If you are interested in this behavior, please file an issue on github."); throw new UnsupportedOperationException(); } throw ioe; } throw e; // unreachable } }
public static void secureDelete(File file) throws IOException { // FIXME somebody who understands these things should have a look at this... if (!file.exists()) return; long size = file.length(); if (size > 0) { RandomAccessFile raf = null; try { System.out.println( "Securely deleting " + file + " which is of length " + size + " bytes..."); raf = new RandomAccessFile(file, "rw"); long count; // Random data first. raf.seek(0); fill(new RandomAccessFileOutputStream(raf), size); raf.getFD().sync(); raf.close(); raf = null; } finally { Closer.close(raf); } } if ((!file.delete()) && file.exists()) throw new IOException("Unable to delete file " + file); }
private void testFile(DataOutputStream out) throws IOException { File file = new File("test.txt"); if (file.exists()) { file.delete(); } RandomAccessFile write = new RandomAccessFile(file, "rws"); // RandomAccessFile write = new RandomAccessFile(file, "rwd"); int fileSize = 10 * 1024 * 1024; FileUtils.setLength(write, fileSize); write.seek(0); int i = 0; FileDescriptor fd = write.getFD(); while (true) { if (write.getFilePointer() >= fileSize) { break; } write.writeBytes(i + "\r\n"); fd.sync(); out.writeInt(i); out.flush(); i++; } write.close(); }
public static void save(RecentGame game, Summoner summoner) throws IOException { ObjectOutputStream objectOutputStream = null; RandomAccessFile raf = null; try { // TODO: write to XML or something instead of Java's silly format String gameFileString = String.format( gameFileFormat, ServerInfo.currentServerInfo.region, summoner.getSummonerID(), game.getGameID()); raf = new RandomAccessFile(FileSystem.getFile(gameFileString), "rw"); FileOutputStream fos = new FileOutputStream(raf.getFD()); objectOutputStream = new ObjectOutputStream(fos); objectOutputStream.writeObject(game); } finally { if (objectOutputStream != null) { objectOutputStream.close(); } if (raf != null) { raf.close(); } } }
public static void main(String[] args) { if (args.length != 1) { System.err.println("Usage: ZIPLINES_PATH"); System.exit(1); } File f = new File(args[0]); long size = f.length(); long numBlocks = (long) (size / ZiplinedBlock.BLOCK_SIZE); long size2 = numBlocks * ZiplinedBlock.BLOCK_SIZE; if (size != size2) { System.err.println( "File size of " + args[0] + " is not a mulitple" + " of " + ZiplinedBlock.BLOCK_SIZE); } try { RandomAccessFile raf = new RandomAccessFile(f, "r"); for (int i = 0; i < numBlocks; i++) { long offset = i * ZiplinedBlock.BLOCK_SIZE; raf.seek(offset); // BufferedReader br = new BufferedReader(new InputStreamReader( // new GZIPInputStream(new FileInputStream(raf.getFD())),ByteOp.UTF8)); BufferedReader br = new BufferedReader( new InputStreamReader( new OpenJDK7GZIPInputStream(new FileInputStream(raf.getFD())), ByteOp.UTF8)); String line = br.readLine(); if (line == null) { System.err.println("Bad block at " + offset + " in " + args[0]); System.exit(1); } System.out.println(args[0] + " " + offset + " " + line); } } catch (IOException e) { e.printStackTrace(); System.exit(1); } }
public void flush() throws IOException { file.seek(bufferStart); file.write(buffer, 0, dataSize); bufferModified = false; file.getFD().sync(); }
public void test() throws IOException { createFile(); // test exception if file doesn't exist try { LazyFileInputStream in = new LazyFileInputStream(file.getAbsolutePath() + "XX"); in.close(); fail(); } catch (IOException e) { // expected } // test open / close (without reading) LazyFileInputStream in = new LazyFileInputStream(file); in.close(); // test reading too much and closing too much in = new LazyFileInputStream(file); assertEquals(0, in.read()); assertEquals(-1, in.read()); assertEquals(-1, in.read()); assertEquals(-1, in.read()); in.close(); in.close(); in.close(); assertEquals(-1, in.read()); // test with file name in = new LazyFileInputStream(file.getAbsolutePath()); assertEquals(1, in.available()); assertEquals(0, in.read()); assertEquals(0, in.available()); assertEquals(-1, in.read()); assertEquals(0, in.available()); in.close(); // test markSupported, mark, and reset in = new LazyFileInputStream(file); assertFalse(in.markSupported()); in.mark(1); assertEquals(0, in.read()); try { in.reset(); fail(); } catch (IOException e) { // expected } assertEquals(-1, in.read()); in.close(); // test read(byte[]) in = new LazyFileInputStream(file); byte[] test = new byte[2]; assertEquals(1, in.read(test)); in.close(); // test read(byte[],int,int) in = new LazyFileInputStream(file); assertEquals(1, in.read(test, 0, 2)); in.close(); // test skip in = new LazyFileInputStream(file); assertEquals(2, in.skip(2)); assertEquals(-1, in.read(test)); assertEquals(0, in.skip(2)); in.close(); // test with the file descriptor RandomAccessFile ra = new RandomAccessFile(file, "r"); in = new LazyFileInputStream(ra.getFD()); assertEquals(0, in.read()); assertEquals(-1, in.read()); in.close(); ra.close(); // test that the file is not opened before reading in = new LazyFileInputStream(file); // this should fail in Windows if the file was opened file.delete(); createFile(); // test that the file is closed after reading the last byte in = new LazyFileInputStream(file); assertEquals(0, in.read()); assertEquals(-1, in.read()); // this should fail in Windows if the file was opened file.delete(); }