/** * @param skip 跳过多少过字节进行插入数据 * @param str 要插入的字符串 * @param fileName 文件路径 */ public static void writeSkip(long skip, String str, String fileName) throws IOException { RandomAccessFile raf = null; try { raf = new RandomAccessFile(fileName, "rw"); if (skip < 0 || skip > raf.length()) { System.out.println("跳过字节数无效"); return; } byte[] b = str.getBytes(); raf.setLength(raf.length() + b.length); for (long i = raf.length() - 1; i > b.length + skip - 1; i--) { raf.seek(i - b.length); byte temp = raf.readByte(); raf.seek(i); raf.writeByte(temp); } raf.seek(skip); raf.write(b); } catch (Exception e) { throw new IOException(e); } finally { try { raf.close(); } catch (IOException e) { throw e; } } }
public static void reconstructTurtle(File partFolder, File reconstructed) throws IOException { Path tmpOut = Files.createTempFile(partFolder.toPath(), "reconstr", ".tmp"); FileOutputStream dstOut = new FileOutputStream(tmpOut.toFile()); FileChannel dstOutChannel = dstOut.getChannel(); try { if (!Files.isDirectory(partFolder.toPath())) throw new IOException("Not a directory: " + partFolder); File[] fileList = FileUtils.listFiles(partFolder, new PrefixFileFilter("part"), TrueFileFilter.TRUE) .toArray(new File[0]); Arrays.sort(fileList); RandomAccessFile inputFile; inputFile = new RandomAccessFile(fileList[0], "r"); inputFile.getChannel().transferTo(0, inputFile.length(), dstOutChannel); inputFile.close(); for (int i = 1; i < fileList.length; i++) { inputFile = new RandomAccessFile(fileList[i], "r"); long lastPrefix = findTurtlePrefixEnd(inputFile); inputFile .getChannel() .transferTo(lastPrefix, inputFile.length() - lastPrefix, dstOutChannel); inputFile.close(); } } finally { dstOut.close(); } Files.move( tmpOut, reconstructed.toPath(), StandardCopyOption.ATOMIC_MOVE, StandardCopyOption.REPLACE_EXISTING); FileUtils.deleteQuietly(tmpOut.toFile()); FileUtils.deleteQuietly(partFolder); }
/** * Returns the uncompressed size of the file in a quick, but unreliable manner. It will not * report the correct size if: * * <ol> * <li>The compressed size is larger than 2<sup>32</sup> bytes. * <li>The file is broken or truncated. * <li>The file has not been generated by a standard-conformant compressor. * <li>It is a multi-volume GZIP stream. * </ol> * * <p>The advantage of this approach is, that it only reads the first 2 and last 4 bytes of the * target file. If the first 2 bytes are not the GZIP magic number, the raw length of the file * is returned. * * @see #isGzipStream(File) * @param file * @return the size of the uncompressed file content. */ public static long getGzipStreamSize(File file) { if (!isGzipStream(file)) { return file.length(); } RandomAccessFile raf = null; try { raf = new RandomAccessFile(file, "r"); if (raf.length() <= 4) { raf.close(); return file.length(); } raf.seek(raf.length() - 4); int b4 = raf.read(); int b3 = raf.read(); int b2 = raf.read(); int b1 = raf.read(); return (b1 << 24) + (b2 << 16) + (b3 << 8) + b4; } catch (IOException ex) { return file.length(); } finally { if (raf != null) try { raf.close(); } catch (IOException e) { // ignore } } }
/** * Reads data from the given file into the given buffer, centered around the given file offset. * The first half of the buffer will be filled with data right before the given offset, while the * remainder of the buffer will contain data right after it (of course, containing the byte at the * given offset). * * @param stream The stream to read from * @param buffer The buffer to read data into * @param fileReferenceOffset The offset to start reading from in the stream. * @return The number of bytes reads, which could be less than the length of the input buffer if * we can't read due to the beginning or the end of the file. * @throws IOException Thrown if the stream being used is invalid or inaccessible. */ private static int readIntoBufferAroundReference( RandomAccessFile stream, byte[] buffer, long fileReferenceOffset) throws IOException { int length = buffer.length; // calculate start offset long fileStartOffset = fileReferenceOffset - length / 2; if (fileStartOffset < 0) { // offset is less than zero, adjust it, as well as the length we want to read length += (int) fileStartOffset; fileStartOffset = 0; if (length <= 0) { return 0; } } if (fileStartOffset + length > stream.length()) { // startOffset + length is beyond the end of the stream, adjust the length accordingly length = (int) (stream.length() - fileStartOffset); if (length <= 0) { return 0; } } // read the appropriate block of the file into the buffer, using symmetry with respect to its // midpoint // we always initiate a seek from the origin of the file. stream.seek(0); stream.seek(fileStartOffset); int bufferOffset = 0; while (bufferOffset < length) { int bytesRead = stream.read(buffer, bufferOffset, length - bufferOffset); bufferOffset += bytesRead; } return length; }
public int copy(InputStream input, RandomAccessFile out, TTask.Task task) throws Exception, IOException { this.mBuffer = new byte[8192]; BufferedInputStream in = new BufferedInputStream(input, 8192); TLog.v(this.TAG, "length" + out.length()); out.seek(out.length()); int count = 0; int byteCount = 0; long errorBlockTimePreviousTime = -1L; long expireTime = 0L; try { while (!task.isCancel()) { byteCount = in.read(this.mBuffer, 0, 8192); if (byteCount == -1) { break; } out.write(this.mBuffer, 0, byteCount); count += byteCount; if (!TMDownloadManager.getInstance().isOnline()) { task.stopTask(); setErrorCode(2); break; } if (this.mSpeed == 0L) { if (errorBlockTimePreviousTime > 0L) { expireTime = System.currentTimeMillis() - errorBlockTimePreviousTime; if (expireTime > 30000L) { setErrorCode(2); task.stopTask(); } } else { errorBlockTimePreviousTime = System.currentTimeMillis(); } } else { expireTime = 0L; errorBlockTimePreviousTime = -1L; } } } finally { try { out.close(); } catch (IOException e) { setErrorCode(3); TLog.e(this.TAG, e.getMessage()); } try { in.close(); } catch (IOException e) { setErrorCode(3); TLog.e(this.TAG, e.getMessage()); } } this.mBuffer = null; return count; }
public int copy(InputStream input, RandomAccessFile out) throws IOException, NetworkErrorException { if (input == null || out == null) { return -1; } byte[] buffer = new byte[BUFFER_SIZE]; BufferedInputStream in = new BufferedInputStream(input, BUFFER_SIZE); if (DEBUG) { Log.v(TAG, "length" + out.length()); } int count = 0, n = 0; long errorBlockTimePreviousTime = -1, expireTime = 0; try { out.seek(out.length()); while (!interrupt) { n = in.read(buffer, 0, BUFFER_SIZE); if (n == -1) { break; } out.write(buffer, 0, n); count += n; /* * check network */ if (!NetworkUtils.isNetworkAvailable(context)) { throw new NetworkErrorException("Network blocked."); } if (networkSpeed == 0) { if (errorBlockTimePreviousTime > 0) { expireTime = System.currentTimeMillis() - errorBlockTimePreviousTime; if (expireTime > TIME_OUT) { throw new ConnectTimeoutException("connection time out."); } } else { errorBlockTimePreviousTime = System.currentTimeMillis(); } } else { expireTime = 0; errorBlockTimePreviousTime = -1; } } } finally { client.close(); // must close client first client = null; out.close(); in.close(); input.close(); } return count; }
private static RegionFile fixNegativeOffset(File regionFileFile) { FMLLog.log( Level.WARNING, "Region file " + regionFileFile + " is corrupted: negative offset. Attempting to fix."); try { Files.copy( regionFileFile, new File(regionFileFile.getParentFile(), regionFileFile.getName() + ".bak")); } catch (IOException e) { FMLLog.log(Level.SEVERE, e, "Failed to back up corrupt region file."); } try { RandomAccessFile dataFile = new RandomAccessFile(regionFileFile, "rw"); try { int length; if (dataFile.length() < 4096L) { for (length = 0; length < 1024; ++length) { dataFile.writeInt(0); } for (length = 0; length < 1024; ++length) { dataFile.writeInt(0); } } if ((dataFile.length() & 4095L) != 0L) { for (length = 0; (long) length < (dataFile.length() & 4095L); ++length) { dataFile.write(0); } } length = (int) dataFile.length() / 4096; dataFile.seek(0L); for (int i = 0; i < 1024; ++i) { int offset = dataFile.readInt(); if (offset != 0 && (offset >> 8) + (offset & 255) <= length) { for (int var5 = 0; var5 < (offset & 255); ++var5) { if ((offset >> 8) + var5 < 0) { dataFile.seek(dataFile.getFilePointer() - 4); dataFile.writeInt(0); break; } } } } } finally { dataFile.close(); } } catch (Throwable t) { FMLLog.log(Level.SEVERE, t, "Failed to fix negative offset index in " + regionFileFile); throw UnsafeUtil.throwIgnoreChecked(t); } return new RegionFile(regionFileFile); }
/** * Creates and initializes an SPV block store. Will create the given file if it's missing. This * operation will block on disk. */ public SPVBlockStore(NetworkParameters params, File file) throws BlockStoreException { checkNotNull(file); this.params = checkNotNull(params); try { this.numHeaders = DEFAULT_NUM_HEADERS; boolean exists = file.exists(); // Set up the backing file. randomAccessFile = new RandomAccessFile(file, "rw"); long fileSize = getFileSize(); if (!exists) { log.info("Creating new SPV block chain file " + file); randomAccessFile.setLength(fileSize); } else if (randomAccessFile.length() != fileSize) { throw new BlockStoreException( "File size on disk does not match expected size: " + randomAccessFile.length() + " vs " + fileSize); } FileChannel channel = randomAccessFile.getChannel(); fileLock = channel.tryLock(); if (fileLock == null) throw new BlockStoreException("Store file is already locked by another process"); // Map it into memory read/write. The kernel will take care of flushing writes to disk at the // most // efficient times, which may mean that until the map is deallocated the data on disk is // randomly // inconsistent. However the only process accessing it is us, via this mapping, so our own // view will // always be correct. Once we establish the mmap the underlying file and channel can go away. // Note that // the details of mmapping vary between platforms. buffer = channel.map(FileChannel.MapMode.READ_WRITE, 0, fileSize); // Check or initialize the header bytes to ensure we don't try to open some random file. byte[] header; if (exists) { header = new byte[4]; buffer.get(header); if (!new String(header, "US-ASCII").equals(HEADER_MAGIC)) throw new BlockStoreException("Header bytes do not equal " + HEADER_MAGIC); } else { initNewStore(params); } } catch (Exception e) { try { if (randomAccessFile != null) randomAccessFile.close(); } catch (IOException e2) { throw new BlockStoreException(e2); } throw new BlockStoreException(e); } }
/** * reads the content of a group of existing files in a zipped block * * @param domain The namespace used to identify the application domain (1st level directory) to * use * @param paths The paths relative to the domain for the files to be read * @param offset The offset in the file to start reading from * @param len The length of the block in bytes in zipped form * @return The contents of the file in zipped form together with the description of the files */ public MultiFileBlock readFromStreamZipped(String[] paths, long offset, int len) { MultiFileBlock block = null; try { if (_isLocal) { int startFileIndex = Util.getStartFileIndex( _rootFile.getCanonicalPath() + File.separatorChar, paths, offset); long startFileOffset = Util.getStartFileOffset( _rootFile.getCanonicalPath() + File.separatorChar, paths, offset); int i = startFileIndex; long j = startFileOffset; // int bufSize=0; int totalBufSize = 0; int readResult = 0; ByteArrayOutputStream bOut = new ByteArrayOutputStream(len); GZIPOutputStream zOut = new GZIPOutputStream(bOut, len); block = new MultiFileBlock(); while (totalBufSize < len && i < paths.length) { File tmpFile = new File(_rootFile.getCanonicalPath() + File.separatorChar + paths[i]); if (tmpFile.isFile() && tmpFile.exists()) { RandomAccessFile in = new RandomAccessFile(tmpFile, "r"); byte[] tmpBuf = new byte[len - totalBufSize]; in.seek(j); while (totalBufSize < len && in.getFilePointer() < in.length()) { readResult = in.read(tmpBuf); if (readResult != -1) { zOut.write(tmpBuf, 0, readResult); // bufSize = bOut.size(); totalBufSize += readResult; } else { break; } } BlockFileDescriptor des = new BlockFileDescriptor(_domain, paths[i], in.length()); block.addBlockFileDescriptor(des); in.close(); i++; j = 0; } else { return null; } } zOut.close(); block.setBlockData(bOut.toByteArray()); } else { block = _remote.readFromStreamZipped(_domain, paths, offset, len); } } catch (Exception ex) { ex.printStackTrace(); } return block; }
protected static void persist(String content, String filePath, boolean append) throws IOException { // Remove string like <?xml version="1.0" encoding="UTF-8"?> // Pattern p = Pattern.compile("(<Comment\\W.*?</Comment>)"); // Matcher m = p.matcher(content); File f = new File(filePath); File parent = f.getParentFile(); if (!parent.exists()) parent.mkdirs(); if (!f.exists()) f.createNewFile(); RandomAccessFile raf = null; try { raf = new RandomAccessFile(f, "rw"); if (!append) raf.setLength(0); byte[] buffer = new byte[1024]; raf.read(buffer); String leadingString = new String(buffer).trim(); Pattern p = Pattern.compile("(<\\?xml\\W.*?\\?>)"); Matcher m = p.matcher(leadingString); String xmlMeta = ""; // Store the xml version meta data, used to insert to the xml file // if needed. if (m.find()) { xmlMeta = m.group(); } leadingString = m.replaceAll(""); leadingString = leadingString .substring(0, Math.min(leadingString.length(), "<CommentsDocument />".length())) .replaceAll("\\s", ""); StringBuilder sb = new StringBuilder(); content = content.replaceAll(p.pattern(), ""); if (leadingString.startsWith("<CommentsDocument/>") || "".equals(leadingString)) { // There is no comments yet. raf.setLength(0); sb.append(xmlMeta); sb.append("<CommentsDocument>"); sb.append(content); sb.append("</CommentsDocument>"); raf.write(sb.toString().getBytes()); } else if (leadingString.startsWith("<CommentsDocument>")) { // There are some comments and append new comments. long fileLen = raf.length() - "</CommentsDocument>".getBytes().length; raf.setLength(fileLen); raf.seek(raf.length()); sb.append(content); sb.append("</CommentsDocument>"); raf.write(sb.toString().getBytes()); } } finally { if (raf != null) raf.close(); } }
/* crea el espacio en el archivo para el nuevo nodo y lo almacena retorna la posicion en la que se almaceno */ public long almacenarComoNuevoNodo() { try { long posicion = _archivo.length(); _archivo.seek(_archivo.length()); _archivo.write(_buffer); _miPosicion = posicion; return posicion; } catch (IOException ex) { System.out.println("Error al almacenar un nodo!"); return -1; } }
private boolean endsWithNewline(RandomAccessFile randomAccessFile) throws IOException { if (randomAccessFile.length() < 1) { return false; } randomAccessFile.seek(randomAccessFile.length() - 1); byte[] chars = new byte[1]; if (randomAccessFile.read(chars) < 1) { return false; } String ch = new String(chars); return "\n".equals(ch) || "\r".equals(ch); }
private void update() throws DatabaseCorruptedException, IOException { try (RandomAccessFile file = new RandomAccessFile(filePath.toString(), "r")) { if (file.length() == 0) { throw new DatabaseCorruptedException("Data base corrupted: empty file found"); } List<String> keys = new LinkedList<>(); List<Integer> offsets = new LinkedList<>(); ByteArrayOutputStream bytes = new ByteArrayOutputStream(); byte b; int counter = 0; do { // Read keys. while ((b = file.readByte()) != 0) { counter++; bytes.write(b); } ++counter; offsets.add(file.readInt()); counter += 4; String key = bytes.toString(TableManager.CODE_FORMAT); bytes.reset(); if (!checkKey(key)) { throw new DatabaseCorruptedException("Wrong key found in file " + filePath.toString()); } keys.add(key); } while (counter < offsets.get(0)); offsets.add((int) file.length()); offsets.remove(0); // It's current position in file, we don't need it in list. Iterator<String> keyIterator = keys.iterator(); for (int nextOffset : offsets) { // Read values. while (counter < nextOffset) { bytes.write(file.readByte()); counter++; } if (bytes.size() > 0) { try { fileMap.put( keyIterator.next(), provider.deserialize(table, bytes.toString(TableManager.CODE_FORMAT))); } catch (ParseException e) { throw new RuntimeException( "Data corrupted in file " + filePath.toString() + " : " + e.getMessage()); } bytes.reset(); } else { throw new DatabaseCorruptedException("Data corrupted in file " + filePath.toString()); } } bytes.close(); } }
/** * reads the content of an existing file using the current domain * * @param domain The namespace used to identify the application domain (1st level directory) to * use * @param path The path relative to the domain for the file * @param offset the offset from the beginning of the file. * @param len The length of the block in bytes * @return The contents of the file */ public byte[] readByteFromFile(String path, long offset, int len) throws EOFException, FileAccessException { try { if (_isLocal) { File tmpFile = new File(_rootFile.getCanonicalPath() + File.separatorChar + path); if (tmpFile.isFile()) { RandomAccessFile raf = new RandomAccessFile(tmpFile, "r"); byte[] buffer = new byte[len]; raf.seek(offset); int totalByteRead = 0; ByteArrayOutputStream out = new ByteArrayOutputStream(); int result = 0; while (totalByteRead < len && raf.getFilePointer() < raf.length()) { result = raf.read(buffer, 0, (len - totalByteRead)); if (result != -1) { out.write(buffer, 0, result); totalByteRead += result; } else if (totalByteRead == 0) throw new EOFException("End of file reached!"); else break; } raf.close(); out.flush(); out.close(); return out.toByteArray(); } else throw new FileAccessException("Path is not a file"); } else return _remote.readByteFromFile(_domain, path, offset, len); } catch (EOFException eofe) { throw eofe; } catch (FileAccessException fae) { throw fae; } catch (Exception e) { throw new FileAccessException(e); } }
/** * reads the content of an existing file using the current domain * * @param domain The namespace used to identify the application domain (1st level directory) to * use * @param path The path relative to the domain for the file * @param block The sequential block number for the data to be read starting with 1 * @param len The length of the block in bytes * @return The contents of the file */ public byte[] readFromFile(String path, int block, int len) { byte[] buffer = null; try { if (_isLocal) { File tmpFile = new File(_rootFile.getCanonicalPath() + File.separatorChar + path); if (tmpFile.isFile()) { RandomAccessFile in = new RandomAccessFile(tmpFile, "r"); in.seek((block - 1) * len); int result = -1; buffer = new byte[len]; if (in.getFilePointer() < in.length()) { result = in.read(buffer); ByteArrayOutputStream out = new ByteArrayOutputStream(result); out.write(buffer, 0, result); in.close(); return out.toByteArray(); } else { in.close(); } } } else { buffer = _remote.readFromFile(_domain, path, block, len); } } catch (Exception ex) { ex.printStackTrace(); } return buffer; }
/** * Here we need to make sure that the ZipFileIndex is valid. Check the timestamp of the file and * if its the same as the one at the time the index was build we don't need to reopen anything. */ private void checkIndex() throws IOException { boolean isUpToDate = true; if (!isUpToDate()) { closeFile(); isUpToDate = false; } if (zipRandomFile != null || isUpToDate) { lastReferenceTimeStamp = System.currentTimeMillis(); return; } hasPopulatedData = true; if (readIndex()) { lastReferenceTimeStamp = System.currentTimeMillis(); return; } directories = Collections.<RelativeDirectory, DirectoryEntry>emptyMap(); allDirs = Collections.<RelativeDirectory>emptySet(); try { openFile(); long totalLength = zipRandomFile.length(); ZipDirectory directory = new ZipDirectory(zipRandomFile, 0L, totalLength, this); directory.buildIndex(); } finally { if (zipRandomFile != null) { closeFile(); } } lastReferenceTimeStamp = System.currentTimeMillis(); }
/** * Reads and returns the full contents of the specified file. * * @param file File to read * @return byte[] containing full contents of file * @throws IOException if there is an I/O error while reading the file */ private byte[] readFile(File file) throws IOException { RandomAccessFile raf = new RandomAccessFile(file, "r"); byte[] buffer = new byte[(int) raf.length()]; raf.readFully(buffer); raf.close(); return buffer; }
public ByteBuffer readBacking() throws IOException { if (!_open) { return null; } ByteBuffer dst = ByteBuffer.allocate((int) _backing.length()); int length; // 4K buffer. byte[] buf = new byte[4 * (2 ^ 10)]; _backing.seek(0); // Read up to 4K blocks from the backing file while ((length = _backing.read(buf)) > 0) { try { // store the block of the correct size into the ByteBuffer. dst.put(buf, 0, length); } catch (BufferOverflowException e) { return null; } } return dst; }
/** * 获取 byte 数据 * * @param key * @return byte 数据 */ public byte[] getAsBinary(String key) { RandomAccessFile RAFile = null; boolean removeFile = false; try { File file = mCache.get(key); if (!file.exists()) return null; RAFile = new RandomAccessFile(file, "r"); byte[] byteArray = new byte[(int) RAFile.length()]; RAFile.read(byteArray); if (!Utils.isDue(byteArray)) { return Utils.clearDateInfo(byteArray); } else { removeFile = true; return null; } } catch (Exception e) { e.printStackTrace(); return null; } finally { if (RAFile != null) { try { RAFile.close(); } catch (IOException e) { e.printStackTrace(); } } if (removeFile) remove(key); } }
public double getProgress() { try { return 1.0 * filein.getFilePointer() / (1.0 * filein.length()); } catch (IOException e) { return 1.0; } }
private void le() { // limpa a area de log para que tenha apenas uma leitura do arquivo na tela if (!areaLog.getText().isEmpty()) { areaLog.setText(defaults.getEMPTY_STRING()); } // verifica a existencia do arquivo de log.. if (!f.exists()) { Utilitarios.getInstance().showErrorMessage("O Arquivo : " + f.getPath() + " Nao Existe!"); } else { try { RandomAccessFile raf = new RandomAccessFile(f, "rw"); // verifica se a dados a serem mostrados if (raf.length() == 0) { Utilitarios.getInstance().showInfoMessage("Arquivo de Log Vazio!"); raf.close(); } else { // preseguimos com a leitura.. String dados; try (BufferedReader in = new BufferedReader(new FileReader(f.getPath()))) { while ((dados = in.readLine()) != null) { areaLog.append(dados + "\n"); } } catch (IOException erro) { ExceptionManager.ThrowException("Erro: ", erro); } } } catch (IOException ex) { Logger.getLogger(FrameLog.class.getName()).log(Level.SEVERE, null, ex); } } }
/** * Ensures that at least <code>pos</code> bytes are cached, or the end of the source is reached. * The return value is equal to the smaller of <code>pos</code> and the length of the source file. */ private long readUntil(long pos) throws IOException { // We've already got enough data cached if (pos < length) { return pos; } // pos >= length but length isn't getting any bigger, so return it if (foundEOF) { return length; } long len = pos - length; cache.seek(length); while (len > 0) { // Copy a buffer's worth of data from the source to the cache // bufLen will always fit into an int so this is safe int nbytes = stream.read(buf, 0, (int) Math.min(len, (long) bufLen)); if (nbytes == -1) { foundEOF = true; return length; } cache.setLength(cache.length() + nbytes); cache.write(buf, 0, nbytes); len -= nbytes; length += nbytes; } return pos; }
/** * Add the given text string to the end of a given file. * * @param inStr The string to be added. * @param fileStr the name of the file to be added. */ public static void addStringToFile(String inStr, String fileStr) throws Exception { RandomAccessFile raFile = new RandomAccessFile(fileStr, "rw"); raFile.seek(raFile.length()); raFile.writeBytes(inStr); raFile.close(); }
private String readInstallationFileBad(File installation) throws IOException { RandomAccessFile f = new RandomAccessFile(installation, "r"); byte[] bytes = new byte[(int) f.length()]; f.readFully(bytes); f.close(); return new String(bytes); }
public synchronized void open() throws IOException { mRAF = new RandomAccessFile(mFile, mReadOnly ? "r" : "rw"); mIN = new RedoLogInput(mRAF, mFile.getPath()); mHeader.read(mRAF); mHeaderRead = true; mFileSizeAtOpen = mRAF.length(); }
public long length() { try { return image.length(); } catch (IOException e) { return -1L; } }
/** 随机读取文件内容 */ public static void readFileByRandomAccess(String fileName) { RandomAccessFile randomFile = null; try { log.debug("随机读取一段文件内容:"); // 打开一个随机访问文件流,按只读方式 randomFile = new RandomAccessFile(fileName, "r"); // 文件长度,字节数 long fileLength = randomFile.length(); // 读文件的起始位置 int beginIndex = (fileLength > 4) ? 4 : 0; // 将读文件的开始位置移到beginIndex位置。 randomFile.seek(beginIndex); byte[] bytes = new byte[10]; int byteread = 0; // 一次读10个字节,如果文件内容不足10个字节,则读剩下的字节。 // 将一次读取的字节数赋给byteread while ((byteread = randomFile.read(bytes)) != -1) { System.out.write(bytes, 0, byteread); } } catch (IOException e) { log.error("", e.fillInStackTrace()); } finally { if (randomFile != null) { try { randomFile.close(); } catch (IOException e) { log.error("", e.fillInStackTrace()); } } } }
private static void parseData() throws IOException { TreeSet<String> fs = scanPath(); parseMeta(); if (fs.size() == 0) return; String absolutePath = new File(fs.first()).getCanonicalPath(); String absolutePath2 = new File(name).getCanonicalPath(); if (!absolutePath.equals(absolutePath2)) { System.out.println("meta file name not match first file, use first file"); pos = 0; } for (String filename : fs) { RandomAccessFile f = new RandomAccessFile(filename, "r"); MappedByteBuffer map = f.getChannel().map(MapMode.READ_ONLY, 0, f.length()); map.position((int) pos); while (map.hasRemaining()) { int size = map.getInt(); byte[] c = new byte[size]; map.get(c); Message m = MessageFactory.getInstance().createMessageFrom(c); m.decompress(); System.out.println("get content: " + asString(m)); } map.clear(); map = null; f.close(); pos = 0; } }
/** * Flush the file, computing the hash if necessary. After this method completes the hash obtained * in {@link #getHeader()} will be valid, but the file will not be closed. If you want the file to * be closed, use {@link #close()} instead, which invokes this method. * * <p><b>Warning</b>: Because of the hash computation this method can be costly! After calling * this the internal hash computation must be completely reset, so a subsequent write will cause * the hash to be updated from scratch. Use caution with this method. In fact, this is the reason * this method is not named {@code flush}. * * @throws IOException An error occurred writing the file. */ public void finish() throws IOException { if (!_open) return; // Save the current position so we can restore it later. long pos = _backing.getFilePointer(); // If the hash is not valid, compute it now. if (!_hashvalid) { // The hash is not valid. Complete the computation now and store // the resulting hash. _backing.seek(_backing.length()); _updateDigest(); _head.hash = _digest.digest(); _hashvalid = true; // Reset the digest now to force it to be re-computed next time. // This must be done since we just "used up" the existing digest // instance. _resetDigest(); } // Write the header to the backing store. _backing.seek(PicoStructure.HEAD_START); _backing.write(_head.putHeader()); // Restore the file position. _backing.seek(pos); }
public List<Entry> getLastPoints(int nb) throws IOException { File file = getFile(); RandomAccessFile raf = null; List<Entry> result = null; try { raf = new RandomAccessFile(file, "r"); long pos = raf.length() - (nb * DATA_LEN); if (pos < 0) { nb = nb + (int) (pos / DATA_LEN); pos = 0; } raf.seek(pos); result = new ArrayList<Entry>(nb); Entry next = null; for (int i = 0; i < nb; i++) { next = new Entry(); next.timestamp = (long) raf.readInt(); next.value = raf.readFloat(); result.add(0, next); } } finally { if (raf != null) raf.close(); } return result; }