public static void main(String args[]) throws Exception { String inputFile = "samplein.txt"; String outputFile = "sampleout.txt"; RandomAccessFile inf = new RandomAccessFile(inputFile, "r"); RandomAccessFile outf = new RandomAccessFile(outputFile, "rw"); long inputLength = new File(inputFile).length(); FileChannel inc = inf.getChannel(); FileChannel outc = outf.getChannel(); MappedByteBuffer inputData = inc.map(FileChannel.MapMode.READ_ONLY, 0, inputLength); Charset latin1 = Charset.forName("ISO-8859-1"); CharsetDecoder decoder = latin1.newDecoder(); CharsetEncoder encoder = latin1.newEncoder(); CharBuffer cb = decoder.decode(inputData); // Process char data here ByteBuffer outputData = encoder.encode(cb); outc.write(outputData); inf.close(); outf.close(); }
Cube(int size) { this.lsize = this.isize = size; this.lsize2 = this.isize2 = size * size; this.lsize3 = lsize2 * lsize; System.out.println(Integer.MAX_VALUE / 4 + " = " + MAX_ITEMS); System.out.println("cube size=" + lsize3 + " items"); System.out.println((lsize2 * size * 4L) + " " + lsize2 * size / MAX_ITEMS); nbOfMappeFiles = (int) Math.max(1, lsize2 * size / MAX_ITEMS + 1); System.out.println("nb of mapped files=" + nbOfMappeFiles); try { if (nbOfMappeFiles > 1) { byteBuffers = new ByteBuffer[size]; for (int i = 0; i < nbOfMappeFiles; i++) { RandomAccessFile file = new RandomAccessFile("cube" + i + ".dat", "rw"); byteBuffers[i] = file.getChannel().map(FileChannel.MapMode.READ_WRITE, 0, Integer.MAX_VALUE); } } else { RandomAccessFile file = new RandomAccessFile("cube0.dat", "rw"); byteBuffer = file.getChannel().map(FileChannel.MapMode.READ_WRITE, 0, Integer.MAX_VALUE); } } catch (IOException ex) { Logger.getLogger(Cube.class.getName()).log(Level.SEVERE, null, ex); } }
public void write(Tag tag, RandomAccessFile raf, RandomAccessFile tempRaf) throws CannotWriteException, IOException { FileChannel fc = raf.getChannel(); int oldTagSize = 0; if (tagExists(fc)) { // read the length if (!canOverwrite(raf)) throw new CannotWriteException("Overwritting of this kind of ID3v2 tag not supported yet"); fc.position(6); ByteBuffer buf = ByteBuffer.allocate(4); fc.read(buf); oldTagSize = (buf.get(0) & 0xFF) << 21; oldTagSize += (buf.get(1) & 0xFF) << 14; oldTagSize += (buf.get(2) & 0xFF) << 7; oldTagSize += buf.get(3) & 0xFF; oldTagSize += 10; // System.err.println("Old tag size: "+oldTagSize); int newTagSize = tc.getTagLength(tag); if (oldTagSize >= newTagSize) { // replace // System.err.println("Old ID32v Tag found, replacing the old // tag"); fc.position(0); fc.write(tc.convert(tag, oldTagSize - newTagSize)); // ID3v2 Tag Written return; } } // create new tag with padding // System.err.println("Creating a new ID3v2 Tag"); fc.position(oldTagSize); if (fc.size() > 15 * 1024 * 1024) { FileChannel tempFC = tempRaf.getChannel(); tempFC.position(0); tempFC.write(tc.convert(tag, Id3v2TagCreator.DEFAULT_PADDING)); tempFC.transferFrom(fc, tempFC.position(), fc.size() - oldTagSize); fc.close(); } else { ByteBuffer[] content = new ByteBuffer[2]; content[1] = ByteBuffer.allocate((int) fc.size()); fc.read(content[1]); content[1].rewind(); content[0] = tc.convert(tag, Id3v2TagCreator.DEFAULT_PADDING); fc.position(0); fc.write(content); } }
/** * 检查文件是否上锁 * * @return * @throws IOException */ public boolean validateFile() throws IOException { fileUrl = System.getProperty("user.dir") + "/SpiderRun.lock"; File myfilelock = new File(fileUrl); RandomAccessFile raf = null; FileChannel fc = null; if (!myfilelock.exists()) { // 不存在,则新增文件,然后加锁 raf = new RandomAccessFile(myfilelock, "rw"); fc = raf.getChannel(); FileLock fl = fc.tryLock(); if (fl != null && fl.isValid()) { // System.err.println(fileUrl + "文件被创建且被锁!"); return false; } } else { // 存在,判断是否被锁 raf = new RandomAccessFile(myfilelock, "rw"); fc = raf.getChannel(); FileLock fl = fc.tryLock(); if (fl != null && fl.isValid()) { // 被锁 System.err.println(fileUrl + "文件已被锁,请删除后,再启动该进程!"); return true; } } return false; }
public void memoryCopyParition() throws IOException { if (DEBUG_MODE) { mBuf.flip(); CommonUtils.printByteBuffer(LOG, mBuf); } mBuf.flip(); long sum = 0; String str = "th " + mMsg + " @ Worker "; if (mOneToMany) { ByteBuffer dst = null; RandomAccessFile file = null; if (mMemoryOnly) { dst = ByteBuffer.allocateDirect(FILE_BYTES); } for (int times = mLeft; times < mRight; times++) { long startTimeMs = System.currentTimeMillis(); if (!mMemoryOnly) { file = new RandomAccessFile(FOLDER + (mWorkerId + BASE_FILE_NUMBER), "rw"); dst = file.getChannel().map(MapMode.READ_WRITE, 0, FILE_BYTES); } dst.order(ByteOrder.nativeOrder()); for (int k = 0; k < BLOCKS_PER_FILE; k++) { mBuf.array()[0] = (byte) (k + mWorkerId); dst.put(mBuf.array()); } dst.clear(); sum += dst.get(times); dst.clear(); if (!mMemoryOnly) { file.close(); } logPerIteration(startTimeMs, times, str, mWorkerId); } } else { ByteBuffer dst = null; RandomAccessFile file = null; if (mMemoryOnly) { dst = ByteBuffer.allocateDirect(FILE_BYTES); } for (int times = mLeft; times < mRight; times++) { long startTimeMs = System.currentTimeMillis(); if (!mMemoryOnly) { file = new RandomAccessFile(FOLDER + (mWorkerId + BASE_FILE_NUMBER), "rw"); dst = file.getChannel().map(MapMode.READ_WRITE, 0, FILE_BYTES); } dst.order(ByteOrder.nativeOrder()); for (int k = 0; k < BLOCKS_PER_FILE; k++) { dst.get(mBuf.array()); } sum += mBuf.get(times % 16); dst.clear(); if (!mMemoryOnly) { file.close(); } logPerIteration(startTimeMs, times, str, mWorkerId); } } Results[mWorkerId] = sum; }
public static void reconstructTurtle(File partFolder, File reconstructed) throws IOException { Path tmpOut = Files.createTempFile(partFolder.toPath(), "reconstr", ".tmp"); FileOutputStream dstOut = new FileOutputStream(tmpOut.toFile()); FileChannel dstOutChannel = dstOut.getChannel(); try { if (!Files.isDirectory(partFolder.toPath())) throw new IOException("Not a directory: " + partFolder); File[] fileList = FileUtils.listFiles(partFolder, new PrefixFileFilter("part"), TrueFileFilter.TRUE) .toArray(new File[0]); Arrays.sort(fileList); RandomAccessFile inputFile; inputFile = new RandomAccessFile(fileList[0], "r"); inputFile.getChannel().transferTo(0, inputFile.length(), dstOutChannel); inputFile.close(); for (int i = 1; i < fileList.length; i++) { inputFile = new RandomAccessFile(fileList[i], "r"); long lastPrefix = findTurtlePrefixEnd(inputFile); inputFile .getChannel() .transferTo(lastPrefix, inputFile.length() - lastPrefix, dstOutChannel); inputFile.close(); } } finally { dstOut.close(); } Files.move( tmpOut, reconstructed.toPath(), StandardCopyOption.ATOMIC_MOVE, StandardCopyOption.REPLACE_EXISTING); FileUtils.deleteQuietly(tmpOut.toFile()); FileUtils.deleteQuietly(partFolder); }
/** * Seek for box with the specified id starting from the current location of filepointer, * * <p>Note it wont find the box if it is contained with a level below the current level, nor if we * are at a parent atom that also contains data and we havent yet processed the data. It will work * if we are at the start of a child box even if it not the required box as long as the box we are * looking for is the same level (or the level above in some cases). * * @param raf * @param id * @throws java.io.IOException */ public static Mp4BoxHeader seekWithinLevel(RandomAccessFile raf, String id) throws IOException { logger.finer("Started searching for:" + id + " in file at:" + raf.getChannel().position()); Mp4BoxHeader boxHeader = new Mp4BoxHeader(); ByteBuffer headerBuffer = ByteBuffer.allocate(HEADER_LENGTH); int bytesRead = raf.getChannel().read(headerBuffer); if (bytesRead != HEADER_LENGTH) { return null; } headerBuffer.rewind(); boxHeader.update(headerBuffer); while (!boxHeader.getId().equals(id)) { logger.finer("Still searching for:" + id + " in file at:" + raf.getChannel().position()); // Something gone wrong probably not at the start of an atom so return null; if (boxHeader.getLength() < Mp4BoxHeader.HEADER_LENGTH) { return null; } int noOfBytesSkipped = raf.skipBytes(boxHeader.getDataLength()); logger.finer("Skipped:" + noOfBytesSkipped); if (noOfBytesSkipped < boxHeader.getDataLength()) { return null; } headerBuffer.rewind(); bytesRead = raf.getChannel().read(headerBuffer); logger.finer("Header Bytes Read:" + bytesRead); headerBuffer.rewind(); if (bytesRead == Mp4BoxHeader.HEADER_LENGTH) { boxHeader.update(headerBuffer); } else { return null; } } return boxHeader; }
/** * Creates a new allocated buffer * * @param file The file we request * @return The file buffer */ public CacheFile getFile(int file) { try { ByteBuffer fileBuffer = containerFile.getChannel().map(FileChannel.MapMode.READ_ONLY, file * 6, 6); int length = ((fileBuffer.get() & 0xFF) << 16) | ((fileBuffer.get() & 0xFF) << 8) | (fileBuffer.get() & 0xFF); int position = ((fileBuffer.get() & 0xFF) << 16) | ((fileBuffer.get() & 0xFF) << 8) | (fileBuffer.get() & 0xFF); if (length == 0) { throw new IOException("Empty file " + file); } ByteBuffer buffer = ByteBuffer.allocate(length); int remaining = length; int offset = 0; while (remaining > 0) { int amount = remaining; if (amount > 512) { amount = 512; } ByteBuffer mainBuffer = mainFileStore .getChannel() .map(FileChannel.MapMode.READ_ONLY, position * 520, remaining + 8); int expectedFile = mainBuffer.getShort() & 0xFFFF; int expectedOffset = mainBuffer.getShort() & 0xFFFF; position = ((mainBuffer.get() & 0xFF) << 16) | ((mainBuffer.get() & 0xFF) << 8) | (mainBuffer.get() & 0xFF); int expectedContainer = mainBuffer.get() & 0xff; if (expectedFile != file) { throw new IOException("Unexpected file!"); } if (expectedOffset != offset) { throw new IOException("Unexpected offset!"); } if (container != expectedContainer) { throw new IOException("Unexpected container!"); } byte[] fileData = new byte[amount]; mainBuffer.get(fileData, 0, amount); buffer.put(fileData); remaining -= amount; offset++; } buffer.flip(); int compression = buffer.get() & 0xff; int fileLength = buffer.getInt(); return new CacheFile(container, file, compression, fileLength, buffer); } catch (Exception e) { return null; } }
private void merge(SingleHit[] hits, String prefix, int chrom) throws IOException { String postmp = getPositionsFname(prefix, chrom) + ".tmp"; String weightstmp = getWeightsFname(prefix, chrom) + ".tmp"; String lastmp = getLaSFname(prefix, chrom) + ".tmp"; RandomAccessFile positionsRAF = new RandomAccessFile(postmp, "rw"); RandomAccessFile weightsRAF = new RandomAccessFile(weightstmp, "rw"); RandomAccessFile lasRAF = new RandomAccessFile(lastmp, "rw"); int newsize = getPositionsBuffer().limit() + hits.length; IntBP posfile = new IntBP(positionsRAF.getChannel().map(FileChannel.MapMode.READ_WRITE, 0, newsize * 4)); FloatBP weightfile = new FloatBP(weightsRAF.getChannel().map(FileChannel.MapMode.READ_WRITE, 0, newsize * 4)); IntBP lasfile = new IntBP(lasRAF.getChannel().map(FileChannel.MapMode.READ_WRITE, 0, newsize * 4)); int oldp = 0; int newp = 0; int pos = 0; IntBP oldpositions = getPositionsBuffer(); FloatBP oldweights = getWeightsBuffer(); IntBP oldlas = getLASBuffer(); while (oldp < oldpositions.limit() || newp < hits.length) { while (newp < hits.length && (oldp == oldpositions.limit() || hits[newp].pos <= oldpositions.get(oldp))) { posfile.put(pos, hits[newp].pos); weightfile.put(pos, hits[newp].weight); lasfile.put(pos, Hits.makeLAS(hits[newp].length, hits[newp].strand)); newp++; pos++; } while (oldp < oldpositions.limit() && (newp == hits.length || oldpositions.get(oldp) <= hits[newp].pos)) { posfile.put(pos, oldpositions.get(oldp)); weightfile.put(pos, oldweights.get(oldp)); lasfile.put(pos, oldlas.get(oldp)); oldp++; pos++; } // System.err.println(String.format("%d %d %d", pos, newp, oldp)); } posfile = null; weightfile = null; lasfile = null; oldpositions = null; oldweights = null; oldlas = null; positionsRAF.close(); weightsRAF.close(); lasRAF.close(); /* ideally this part with the renames would atomic... */ (new File(postmp)).renameTo(new File(getPositionsFname(prefix, chrom))); (new File(weightstmp)).renameTo(new File(getWeightsFname(prefix, chrom))); (new File(lastmp)).renameTo(new File(getLaSFname(prefix, chrom))); }
@Test public void testChannelTransferFrom() throws Exception { RandomAccessFile fromFile = new RandomAccessFile("g:/u2/from.log", "rw"); FileChannel fromChannel = fromFile.getChannel(); RandomAccessFile toFile = new RandomAccessFile("g:/u2/to.log", "rw"); FileChannel toChannel = toFile.getChannel(); long position = 0; long count = fromChannel.size(); long result = toChannel.transferFrom(fromChannel, position, count); }
private void forceSync(@NotNull final RandomAccessFile file) { try { final FileChannel channel = file.getChannel(); channel.force(false); } catch (ClosedChannelException e) { // ignore } catch (IOException ioe) { if (file.getChannel().isOpen()) { throw new ExodusException(ioe); } } }
@Test public void testChannelTransferTo() throws Exception { RandomAccessFile fromFile = new RandomAccessFile("g:/u2/from.log", "rw"); FileChannel fromChannel = fromFile.getChannel(); RandomAccessFile toFile = new RandomAccessFile("g:/u2/to.log", "rw"); FileChannel toChannel = toFile.getChannel(); long position = 0; long count = fromChannel.size(); long result = fromChannel.transferTo(position, count, toChannel); System.out.println(result); }
@FixFor("MODE-1358") @Test public void shouldCopyFilesUsingStreams() throws Exception { // Copy a large file into a temporary file ... File tempFile = File.createTempFile("copytest", "pdf"); RandomAccessFile destinationRaf = null; RandomAccessFile originalRaf = null; try { URL sourceUrl = getClass().getResource("/docs/postgresql-8.4.1-US.pdf"); assertThat(sourceUrl, is(notNullValue())); File sourceFile = new File(sourceUrl.toURI()); assertThat(sourceFile.exists(), is(true)); assertThat(sourceFile.canRead(), is(true)); assertThat(sourceFile.isFile(), is(true)); boolean useBufferedStream = true; final int bufferSize = AbstractBinaryStore.bestBufferSize(sourceFile.length()); destinationRaf = new RandomAccessFile(tempFile, "rw"); originalRaf = new RandomAccessFile(sourceFile, "r"); FileChannel destinationChannel = destinationRaf.getChannel(); OutputStream output = Channels.newOutputStream(destinationChannel); if (useBufferedStream) output = new BufferedOutputStream(output, bufferSize); // Create an input stream to the original file ... FileChannel originalChannel = originalRaf.getChannel(); InputStream input = Channels.newInputStream(originalChannel); if (useBufferedStream) input = new BufferedInputStream(input, bufferSize); // Copy the content ... Stopwatch sw = new Stopwatch(); sw.start(); IoUtil.write(input, output, bufferSize); sw.stop(); System.out.println( "Time to copy \"" + sourceFile.getName() + "\" (" + sourceFile.length() + " bytes): " + sw.getTotalDuration()); } finally { tempFile.delete(); if (destinationRaf != null) destinationRaf.close(); if (originalRaf != null) originalRaf.close(); } }
protected ChannelFuture sendFile(File file) { final RandomAccessFile raf; try { raf = new RandomAccessFile(file, "r"); long fileLength = file.length(); // Write the content. ChannelFuture writeFuture; if (isSSL()) { // Cannot use zero-copy with HTTPS. writeFuture = channel.write(new ChunkedFile(raf, 0, fileLength, 8192)); } else { // No encryption - use zero-copy. final FileRegion region = new DefaultFileRegion(raf.getChannel(), 0, fileLength); writeFuture = channel.write(region); } writeFuture.addListener( new ChannelFutureListener() { public void operationComplete(ChannelFuture future) throws Exception { raf.close(); } }); return writeFuture; } catch (IOException e) { handleException(e); return null; } }
private static void parseData() throws IOException { TreeSet<String> fs = scanPath(); parseMeta(); if (fs.size() == 0) return; String absolutePath = new File(fs.first()).getCanonicalPath(); String absolutePath2 = new File(name).getCanonicalPath(); if (!absolutePath.equals(absolutePath2)) { System.out.println("meta file name not match first file, use first file"); pos = 0; } for (String filename : fs) { RandomAccessFile f = new RandomAccessFile(filename, "r"); MappedByteBuffer map = f.getChannel().map(MapMode.READ_ONLY, 0, f.length()); map.position((int) pos); while (map.hasRemaining()) { int size = map.getInt(); byte[] c = new byte[size]; map.get(c); Message m = MessageFactory.getInstance().createMessageFrom(c); m.decompress(); System.out.println("get content: " + asString(m)); } map.clear(); map = null; f.close(); pos = 0; } }
private CsvFile(File csv, CoordinateReferenceSystem crs) throws IOException { this.csv = csv; ConverterRegistry.getInstance().setConverter(ProductData.UTC.class, new UTCConverter()); this.crs = crs; RandomAccessFile randomAccessFile = new RandomAccessFile(csv, "r"); stream = new FileChannelImageInputStream(randomAccessFile.getChannel()); }
/** * Close the file and automatically releases the {@link FileLock} (if any) and removes the * advisory lock for that file (if any). * * <p>Note: This method should be used in combination with {@link FileLockUtility#openFile(File, * String, boolean)} in order to ensure that the optional advisory lock file is deleted when the * file is closed. The purpose of the advisory lock file is to provide advisory locking file modes * (read-only), platforms, or file systems (NFS) that do not support {@link FileLock}. * * @param file The file. * @param raf The {@link RandomAccessFile}. * @throws IOException */ public static void closeFile(File file, RandomAccessFile raf) throws IOException { if (file == null) throw new IllegalArgumentException(); if (raf == null) throw new IllegalArgumentException(); try { if (raf.getChannel().isOpen()) { /* * close the file iff open. * * Note: a thread that is interrupted during an IO can cause the * file to be closed asynchronously. This is handled by the * disk-based store modes. */ raf.close(); } } finally { /* * Remove the advisory lock (if present) regardles of whether the * file is currently open (see note above). */ removeAdvisoryLock(file); } }
/** * Performs locking. If returns {@code true}, locking was successful and caller holds the lock. * Multiple invocations, after lock is acquired, does not have any effect, locking happens only * once. */ public synchronized boolean lock() { if (fileLock != null) { return true; } try { randomAccessFile = new RandomAccessFile(lockFile, "rws"); fileLock = randomAccessFile.getChannel().tryLock(0L, 1L, false); if (fileLock != null) { randomAccessFile.setLength(0); randomAccessFile.seek(0); randomAccessFile.write(payload); } } catch (IOException | OverlappingFileLockException e) { // logging is not configured yet, so use console System.err.println("Failed to write lock file"); e.printStackTrace(); // handle it as null result fileLock = null; } finally { if (fileLock == null) { release(); return false; } } return true; }
public MappedStore( File file, FileChannel.MapMode mode, long size, ObjectSerializer objectSerializer) throws IOException { if (size < 0 || size > 128L << 40) { throw new IllegalArgumentException("invalid size: " + size); } this.file = file; this.size = size; this.objectSerializer = objectSerializer; try { RandomAccessFile raf = new RandomAccessFile(file, accesModeFor(mode)); if (raf.length() != this.size && !file.getAbsolutePath().startsWith("/dev/")) { if (mode != FileChannel.MapMode.READ_WRITE) { throw new IOException("Cannot resize file to " + size + " as mode is not READ_WRITE"); } raf.setLength(this.size); } this.fileChannel = raf.getChannel(); this.address = map0(fileChannel, imodeFor(mode), 0L, size); this.cleaner = Cleaner.create(this, new Unmapper(address, size, fileChannel)); } catch (Exception e) { throw wrap(e); } }
private static void tryObtainingDataDirLockOrDie() { StrictMode.ThreadPolicy oldPolicy = StrictMode.allowThreadDiskReads(); StrictMode.allowThreadDiskWrites(); try { String dataPath = PathUtils.getDataDirectory(ContextUtils.getApplicationContext()); File lockFile = new File(dataPath, EXCLUSIVE_LOCK_FILE); boolean success = false; try { // Note that the file is not closed intentionally. RandomAccessFile file = new RandomAccessFile(lockFile, "rw"); sExclusiveFileLock = file.getChannel().tryLock(); success = sExclusiveFileLock != null; } catch (IOException e) { Log.w(TAG, "Failed to create lock file " + lockFile, e); } if (!success) { Log.w( TAG, "The app may have another WebView opened in a separate process. " + "This is not recommended and may stop working in future versions."); } } finally { StrictMode.setThreadPolicy(oldPolicy); } }
@Test public void shouldKeepLockWhileMovingLockedFile() throws IOException { File tmpDir = new File("target"); System.out.println("Temporary directory for tests: " + tmpDir.getAbsolutePath()); assertThat(tmpDir.exists(), is(true)); assertThat(tmpDir.canRead(), is(true)); assertThat(tmpDir.canWrite(), is(true)); assertThat(tmpDir.isDirectory(), is(true)); File file1 = new File(tmpDir, "lockFile"); // file1.createNewFile(); // Lock the file ... RandomAccessFile raf = new RandomAccessFile(file1, "rw"); FileLock fileLock = raf.getChannel().lock(); // Now try moving our locked file ... File file2 = new File(tmpDir, "afterMove"); if (!file1.renameTo(file2)) { LOGGER.warn("RenameTo not successful. Will be ignored if on Windows"); if (System.getProperty("os.name").toLowerCase().contains("windows")) { fileLock.release(); return; } } fileLock.release(); assertThat(file1.exists(), is(false)); assertThat(file2.exists(), is(true)); }
/** * Create a temporary file used to lock ({@link FileLock}) an associated incomplete file {@link * File}. The {@link FileLock}'s name is derived from the original file, appending ".lock" at the * end. Usually this method gets executed when a download fail to complete because the JVM goes * down. In that case we resume the incomplete download and to prevent multiple process to work on * the same file, we use a dedicated {@link FileLock}. * * @param tmpFile a file on which we want to create a temporary lock file. * @return a {@link FileLockCompanion} contains the {@link File} and a {@link FileLock} if it was * possible to lock the file. */ private FileLockCompanion lockFile(File tmpFile) { try { // On Unix tmpLock.getChannel().tryLock may not fail inside the same process, so we must keep // track // of current resumable file. if (activeDownloadFiles.containsKey(tmpFile)) { return new FileLockCompanion(tmpFile, null); } RandomAccessFile tmpLock = new RandomAccessFile(tmpFile.getPath() + ".lock", "rw"); FileLock lock = tmpLock.getChannel().tryLock(0, 1, false); if (lock != null) { activeDownloadFiles.put(tmpLock, Boolean.TRUE); } else if (lock == null) { try { tmpLock.close(); } catch (IOException ex) { } } return new FileLockCompanion(tmpFile, lock, tmpFile.getPath() + ".lock"); } catch (OverlappingFileLockException ex) { return new FileLockCompanion(tmpFile, null); } catch (IOException ex) { return new FileLockCompanion(tmpFile, null); } }
public boolean hasNext() { if (raf == null || !raf.getChannel().isOpen()) return false; if (flagNext == true) return true; // Déjà lue flagNext = true; try { next = new Entry(); next.timestamp = (long) raf.readInt(); next.value = raf.readFloat(); if (this.end != null && next.timestamp > this.end) { next = null; close(); return false; } return true; } catch (IOException e) { // EOF ou autre erreur d'IO if (!(e instanceof EOFException)) logger.log(Level.WARNING, e.getMessage(), e); next = null; try { close(); } catch (IOException e1) { logger.log(Level.WARNING, e.getMessage(), e); } return false; } }
/** * Mapped File way MappedByteBuffer 可以在处理大文件时,提升性能 * * @param filename * @return * @throws IOException */ public static byte[] toByteArray3(String filePath) throws IOException { FileChannel fc = null; RandomAccessFile rf = null; try { rf = new RandomAccessFile(filePath, "r"); fc = rf.getChannel(); MappedByteBuffer byteBuffer = fc.map(MapMode.READ_ONLY, 0, fc.size()).load(); // System.out.println(byteBuffer.isLoaded()); byte[] result = new byte[(int) fc.size()]; if (byteBuffer.remaining() > 0) { // System.out.println("remain"); byteBuffer.get(result, 0, byteBuffer.remaining()); } return result; } catch (IOException e) { e.printStackTrace(); throw e; } finally { try { rf.close(); fc.close(); } catch (IOException e) { e.printStackTrace(); } } }
/** * Sets the version for the given neostore file in {@code storeDir}. * * @param storeDir the store dir to locate the neostore file in. * @param version the version to set. * @return the previous version before writing. */ public static long setVersion(String storeDir, long version) { RandomAccessFile file = null; try { file = new RandomAccessFile(new File(storeDir, NeoStore.DEFAULT_NAME), "rw"); FileChannel channel = file.getChannel(); channel.position(RECORD_SIZE * 2 + 1 /*inUse*/); ByteBuffer buffer = ByteBuffer.allocate(8); channel.read(buffer); buffer.flip(); long previous = buffer.getLong(); channel.position(RECORD_SIZE * 2 + 1 /*inUse*/); buffer.clear(); buffer.putLong(version).flip(); channel.write(buffer); return previous; } catch (IOException e) { throw new RuntimeException(e); } finally { try { if (file != null) file.close(); } catch (IOException e) { throw new RuntimeException(e); } } }
/** * Ecrit la piece donnée dans le fichier temporaire sur le disque * * @param piece : pièce à écrire * @param num : numéros de la pièce */ private synchronized void writePieceTmpFile(byte[] piece, int num) { if (num < 0 || num >= this.nbPieces()) { throw new IllegalArgumentException(); } if (piece.length > _piecesize) { throw new IllegalArgumentException(); } try { RandomAccessFile writer_tmp = new RandomAccessFile(this, "rw"); FileChannel writer = writer_tmp.getChannel(); int index_piece = ((int) this.length() - this.headerSize()) / _piecesize; if (piece.length < _piecesize) { piece = Arrays.copyOf(piece, _piecesize); } Tools.write(writer, 4 + _key.length() + 4 + 4 + 4 * num, index_piece); Tools.write(writer, this.headerSize() + _piecesize * index_piece, piece); writer.force(true); writer_tmp.close(); } catch (Exception e) { System.out.println("Unable to write tmp file piece"); e.printStackTrace(); } }
private static long getRecord(String storeDir, long recordPosition) { RandomAccessFile file = null; try { file = new RandomAccessFile(new File(storeDir), "rw"); FileChannel channel = file.getChannel(); /* * We have to check size, because the store version * field was introduced with 1.5, so if there is a non-clean * shutdown we may have a buffer underflow. */ if (recordPosition > 3 && channel.size() < RECORD_SIZE * 5) { return -1; } channel.position(RECORD_SIZE * recordPosition + 1 /*inUse*/); ByteBuffer buffer = ByteBuffer.allocate(8); channel.read(buffer); buffer.flip(); long previous = buffer.getLong(); return previous; } catch (IOException e) { throw new RuntimeException(e); } finally { try { if (file != null) file.close(); } catch (IOException e) { throw new RuntimeException(e); } } }
@Test public void testDecodingFileWithBufferedSessionData() throws Exception { final ReadableByteChannel channel = new ReadableByteChannelMock( new String[] {"stuff; ", "more stuff; ", "a lot more stuff!"}, Consts.ASCII); final SessionInputBuffer inbuf = new SessionInputBufferImpl(1024, 256, Consts.ASCII); final HttpTransportMetricsImpl metrics = new HttpTransportMetricsImpl(); final IdentityDecoder decoder = new IdentityDecoder(channel, inbuf, metrics); final int i = inbuf.fill(channel); Assert.assertEquals(7, i); createTempFile(); final RandomAccessFile testfile = new RandomAccessFile(this.tmpfile, "rw"); try { final FileChannel fchannel = testfile.getChannel(); long pos = 0; while (!decoder.isCompleted()) { final long bytesRead = decoder.transfer(fchannel, pos, 10); if (bytesRead > 0) { pos += bytesRead; } } // count everything except the initial 7 bytes that went to the session buffer Assert.assertEquals(testfile.length() - 7, metrics.getBytesTransferred()); } finally { testfile.close(); } Assert.assertEquals( "stuff; more stuff; a lot more stuff!", CodecTestUtils.readFromFile(this.tmpfile)); }
/* (non-Javadoc) * @see com.ongraphdb.store.DiskSore1#start() */ public void start() throws IOException { File file = new File(dataFileName); boolean newStore = file.exists() ? false : true; RandomAccessFile dataFile = new RandomAccessFile(dataFileName, "rw"); dataFile.setLength(initialFileSize); dataChannel = dataFile.getChannel(); dataLock = dataChannel.lock(); mappedDataBuffer = dataChannel.map(MapMode.READ_WRITE, 0, dataMappedMemorySize); if (newStore) { nextPosition = NEXT_BYTES; mappedDataBuffer.putInt(nextPosition); } else { nextPosition = mappedDataBuffer.getInt(); } shutdownHookThread = new Thread() { public void run() { try { mappedDataBuffer.force(); dataLock.release(); dataChannel.close(); unmap(mappedDataBuffer); } catch (Exception e) { e.printStackTrace(); } } }; Runtime.getRuntime().addShutdownHook(shutdownHookThread); }
/** * @param dataFile * @param rows * @param cols * @param columnSizesInBits in bytes * @param signed , true if the data consists of negative numbers * @param isMmap heap or mmmap * @throws IOException */ private FixedBitWidthRowColDataFileReader( File dataFile, int rows, int cols, int[] columnSizesInBits, boolean[] signed, boolean isMmap) throws IOException { init(rows, cols, columnSizesInBits, signed); file = new RandomAccessFile(dataFile, "rw"); this.isMmap = isMmap; if (isMmap) { byteBuffer = file.getChannel().map(FileChannel.MapMode.READ_ONLY, 0, totalSizeInBytes).load(); } else { byteBuffer = ByteBuffer.allocateDirect(totalSizeInBytes); file.getChannel().read(byteBuffer); file.close(); } ownsByteBuffer = true; customBitSet = CustomBitSet.withByteBuffer(totalSizeInBytes, byteBuffer); }