/** * 给定一个ip国家地区记录的偏移,返回一个IPLocation结构 * * @param offset 国家记录的起始偏移 * @return IPLocation对象 */ private IPLocation getIPLocation(long offset) { try { // 跳过4字节ip ipFile.seek(offset + 4); // 读取第一个字节判断是否标志字节 byte b = ipFile.readByte(); if (b == REDIRECT_MODE_1) { // 读取国家偏移 long countryOffset = readLong3(); // 跳转至偏移处 ipFile.seek(countryOffset); // 再检查一次标志字节,因为这个时候这个地方仍然可能是个重定向 b = ipFile.readByte(); if (b == REDIRECT_MODE_2) { loc.setCountry(readString(readLong3())); ipFile.seek(countryOffset + 4); } else loc.setCountry(readString(countryOffset)); // 读取地区标志 loc.setArea(readArea(ipFile.getFilePointer())); } else if (b == REDIRECT_MODE_2) { loc.setCountry(readString(readLong3())); loc.setArea(readArea(offset + 8)); } else { loc.setCountry(readString(ipFile.getFilePointer() - 1)); loc.setArea(readArea(ipFile.getFilePointer())); } return loc; } catch (IOException e) { return null; } }
/** * Read a file offset from the file PST Files have this tendency to store file offsets (pointers) * in 8 little endian bytes. Convert this to a long for seeking to. * * @param in handle for PST file * @param startOffset where to read the 8 bytes from * @return long representing the read location * @throws IOException */ protected long extractLEFileOffset(long startOffset) throws IOException { long offset = 0; if (this.getPSTFileType() == PSTFile.PST_TYPE_ANSI) { in.seek(startOffset); byte[] temp = new byte[4]; in.read(temp); offset |= temp[3] & 0xff; offset <<= 8; offset |= temp[2] & 0xff; offset <<= 8; offset |= temp[1] & 0xff; offset <<= 8; offset |= temp[0] & 0xff; } else { in.seek(startOffset); byte[] temp = new byte[8]; in.read(temp); offset = temp[7] & 0xff; long tmpLongValue; for (int x = 6; x >= 0; x--) { offset = offset << 8; tmpLongValue = (long) temp[x] & 0xff; offset |= tmpLongValue; } } return offset; }
/** * Flush the file, computing the hash if necessary. After this method completes the hash obtained * in {@link #getHeader()} will be valid, but the file will not be closed. If you want the file to * be closed, use {@link #close()} instead, which invokes this method. * * <p><b>Warning</b>: Because of the hash computation this method can be costly! After calling * this the internal hash computation must be completely reset, so a subsequent write will cause * the hash to be updated from scratch. Use caution with this method. In fact, this is the reason * this method is not named {@code flush}. * * @throws IOException An error occurred writing the file. */ public void finish() throws IOException { if (!_open) return; // Save the current position so we can restore it later. long pos = _backing.getFilePointer(); // If the hash is not valid, compute it now. if (!_hashvalid) { // The hash is not valid. Complete the computation now and store // the resulting hash. _backing.seek(_backing.length()); _updateDigest(); _head.hash = _digest.digest(); _hashvalid = true; // Reset the digest now to force it to be re-computed next time. // This must be done since we just "used up" the existing digest // instance. _resetDigest(); } // Write the header to the backing store. _backing.seek(PicoStructure.HEAD_START); _backing.write(_head.putHeader()); // Restore the file position. _backing.seek(pos); }
/** * Stops the recording, and sets the state to STOPPED. In case of further usage, a reset is * needed. Also finalizes the wave file in case of uncompressed recording. */ public void stop() { if (state == State.RECORDING) { if (rUncompressed) { audioRecorder.stop(); try { randomAccessWriter.seek(4); // Write size to RIFF header randomAccessWriter.writeInt(Integer.reverseBytes(36 + payloadSize)); randomAccessWriter.seek(40); // Write size to Subchunk2Size field randomAccessWriter.writeInt(Integer.reverseBytes(payloadSize)); randomAccessWriter.close(); } catch (IOException e) { Log.e( ExtAudioRecorder.class.getName(), "I/O exception occured while closing output file"); state = State.ERROR; } } else { mediaRecorder.stop(); } state = State.STOPPED; } else { Log.e(ExtAudioRecorder.class.getName(), "stop() called on illegal state"); state = State.ERROR; } }
/** * Delete an index entry in the idx file of the specified column. * * @param colname the column's name * @param idxPos index entry (line nr) to be deleted * @throws Exception */ protected long deleteIndexEntry(String colname, int idxPos) throws Exception { long s = System.currentTimeMillis(); if (!indexExists(colname)) { throw new Exception("No index created"); } int pkindex = def.getColPosition(colname); if (pkindex == -1) { throw new Exception("Column does not exist"); } Integer[] size = def.getSizes(); int recordSize = idxFixedRecordLength() + size[pkindex]; indexFile.seek(idxPos * recordSize); String sLine = indexFile.readLine(); String[] parts = sLine.split("#"); if (Integer.parseInt(parts[0].trim()) != idxPos) { throw new Exception("Index not found in index file"); } else { indexFile.seek(idxPos * recordSize + 6); String flag = "D"; indexFile.write(flag.toString().getBytes()); } long e = System.currentTimeMillis(); return e - s; }
/** * Search the pattern in mRAF, positioning the pointer at the beginning of the pattern. * * @param pattern * @return true if pattern was found, false if not found */ private boolean searchInRAF(byte[] pattern) throws IOException { int viewSize = 4 * 1024; if (pattern.length > viewSize) return false; byte view[] = new byte[viewSize * 2]; // double the size for pattern appearing on single view boundary long rafPos = mRAF.getFilePointer(); long viewBaseOffset = rafPos - (rafPos % viewSize); mRAF.seek(viewBaseOffset); int startOffset = (int) (rafPos - viewBaseOffset); boolean atFileEnd = false; int bytesRead; while (!atFileEnd && (bytesRead = mRAF.read(view, 0, view.length)) != -1) { if (bytesRead < pattern.length) break; atFileEnd = viewBaseOffset + bytesRead >= mFileSizeAtOpen; // don't read past end of file at open time if (atFileEnd) bytesRead = (int) (mFileSizeAtOpen - viewBaseOffset); int endOffset = Math.min(view.length, bytesRead); int matchAt = searchByteArray(view, startOffset, endOffset, pattern); if (matchAt != -1) { mRAF.seek(viewBaseOffset + matchAt); return true; } // bring in the next chunk of data from file viewBaseOffset += viewSize; mRAF.seek(viewBaseOffset); startOffset = 0; } mRAF.seek(rafPos); // move the pointer back to where we were before the failed search return false; }
/** * @param skip 跳过多少过字节进行插入数据 * @param str 要插入的字符串 * @param fileName 文件路径 */ public static void writeSkip(long skip, String str, String fileName) throws IOException { RandomAccessFile raf = null; try { raf = new RandomAccessFile(fileName, "rw"); if (skip < 0 || skip > raf.length()) { System.out.println("跳过字节数无效"); return; } byte[] b = str.getBytes(); raf.setLength(raf.length() + b.length); for (long i = raf.length() - 1; i > b.length + skip - 1; i--) { raf.seek(i - b.length); byte temp = raf.readByte(); raf.seek(i); raf.writeByte(temp); } raf.seek(skip); raf.write(b); } catch (Exception e) { throw new IOException(e); } finally { try { raf.close(); } catch (IOException e) { throw e; } } }
private String readLine() throws IOException { StringBuffer sb = new StringBuffer(); char readChar; int ch; long pos = reader.getFilePointer(); long length = file.length(); if ((length < pos) || (length == pos && FileUtils.isFileNewer(file, accessTime))) { // file got rotated or truncated reader.close(); reader = new RandomAccessFile(file, "r"); position = 0; reader.seek(position); pos = 0; } accessTime = System.currentTimeMillis(); while ((ch = reader.read()) != -1) { readChar = (char) ch; if (readChar != delimiter) { sb.append(readChar); } else { return sb.toString(); } } reader.seek(pos); return null; }
/** * Persists the bloom filter to disk. * * @throws IOException if I/O errors are encountered. */ public void flush() throws IOException { cacheLock.writeLock().lock(); try { checkIfOpen(); if (cacheDirty && unflushedChanges != null && file != null) { final int offset = this.metadata.getHeaderLength(); // it's actually a disk-backed filter with changes if (unflushedChangeCounter.get() >= seekThreshold) { file.seek(offset); file.write(cache); // can probably be made more efficient file.getFD().sync(); } else { for (Map.Entry<Integer, Byte> change : unflushedChanges.entrySet()) { file.seek(change.getKey() + offset); file.write(change.getValue()); } } cacheDirty = false; unflushedChanges.clear(); unflushedChangeCounter.set(0); } } finally { cacheLock.writeLock().unlock(); } }
/** * Reads data from the given file into the given buffer, centered around the given file offset. * The first half of the buffer will be filled with data right before the given offset, while the * remainder of the buffer will contain data right after it (of course, containing the byte at the * given offset). * * @param stream The stream to read from * @param buffer The buffer to read data into * @param fileReferenceOffset The offset to start reading from in the stream. * @return The number of bytes reads, which could be less than the length of the input buffer if * we can't read due to the beginning or the end of the file. * @throws IOException Thrown if the stream being used is invalid or inaccessible. */ private static int readIntoBufferAroundReference( RandomAccessFile stream, byte[] buffer, long fileReferenceOffset) throws IOException { int length = buffer.length; // calculate start offset long fileStartOffset = fileReferenceOffset - length / 2; if (fileStartOffset < 0) { // offset is less than zero, adjust it, as well as the length we want to read length += (int) fileStartOffset; fileStartOffset = 0; if (length <= 0) { return 0; } } if (fileStartOffset + length > stream.length()) { // startOffset + length is beyond the end of the stream, adjust the length accordingly length = (int) (stream.length() - fileStartOffset); if (length <= 0) { return 0; } } // read the appropriate block of the file into the buffer, using symmetry with respect to its // midpoint // we always initiate a seek from the origin of the file. stream.seek(0); stream.seek(fileStartOffset); int bufferOffset = 0; while (bufferOffset < length) { int bytesRead = stream.read(buffer, bufferOffset, length - bufferOffset); bufferOffset += bytesRead; } return length; }
private void writeBlock(String path, int id, byte data[]) throws IOException { int writer_id = (id / FILE_BLK_SIZE); int writer_blk_id = (id % FILE_BLK_SIZE); // Backfill the writer array if (writer_id >= fFileRWList.size()) { // Add a new database file File f = new File(fDBDir, (writer_id + 1) + ".db"); RandomAccessFile rw = new RandomAccessFile(f, "rw"); fFileRWList.add(rw); fLastRwBlkLen = 0; } RandomAccessFile rw = fFileRWList.get(writer_id); // See if the file is long enough if (writer_blk_id >= fLastRwBlkLen) { byte tmp[] = new byte[BLK_SIZE]; // Extend the length rw.seek(fLastRwBlkLen * BLK_SIZE); while (writer_blk_id >= fLastRwBlkLen) { rw.write(tmp); fLastRwBlkLen++; } } // Finally, seek to the block offset and write the block rw.seek(writer_blk_id * BLK_SIZE); rw.write(data); }
@Override public void flushBefore(long pos) throws IOException { long readFromPos = flushedPos; super.flushBefore(pos); long bytesToRead = pos - readFromPos; raf.seek(readFromPos); if (bytesToRead < MAX_BUFFER_LEN) { byte buffer[] = new byte[(int) bytesToRead]; raf.readFully(buffer); os.write(buffer); } else { byte buffer[] = new byte[MAX_BUFFER_LEN]; while (bytesToRead > 0) { int count = (int) Math.min(MAX_BUFFER_LEN, bytesToRead); raf.readFully(buffer, 0, count); os.write(buffer, 0, count); bytesToRead -= count; } } os.flush(); if (pos != streamPos) { raf.seek(streamPos); // Reset the position } }
/** * appends image to given tiff file * * <p>if file is empty then this method write image to the file otherwise it will append the image * after previous image. <strong>Please note: This method supports only Big endian tiff * files</strong> * * @param img BufferedImage to append image * @param fileName The name of the file where the image will be written * @throws IOException if the file is unreadable */ public void append(BufferedImage img, String fileName) throws IOException { File file = new File(fileName); if (file.exists() && file.length() > 0) { int endFile = (int) file.length(); int padding = endFile % 8; RandomAccessFile rFile = new RandomAccessFile(fileName, "rw"); rFile.seek(endFile); for (int i = 0; i < padding; i++) { rFile.write(0); } endFile += padding; alterLastIFDOffset(rFile, endFile); ByteArrayOutputStream bos = new ByteArrayOutputStream(); byte[] data = getComponentBytes(img); boolean hasAlpha = data.length > (img.getWidth() * img.getHeight() * 3); if (compress) { data = performCompression(data, img.getWidth(), img.getHeight(), hasAlpha); } writeContents(bos, data, img.getWidth(), img.getHeight(), endFile, hasAlpha, compress); bos.close(); data = bos.toByteArray(); rFile.seek(endFile); rFile.write(data); rFile.close(); } else { BufferedOutputStream out = new BufferedOutputStream(new FileOutputStream(fileName)); createImage(img, out, compress); out.close(); } }
public int bubbleSort() throws Exception { boolean swapped = true; int j = 0; int counter = 0; while (swapped) { swapped = false; j++; for (int i = 0; i < length() - j; i++) { file.seek(i * 4); int one = file.readInt(); int two = file.readInt(); if (one > two) { file.seek(i * 4); file.writeInt(two); file.writeInt(one); swapped = true; counter++; } } } return counter; }
public static void secureDelete(File file, Random random) throws IOException { // FIXME somebody who understands these things should have a look at this... if (!file.exists()) return; long size = file.length(); if (size > 0) { RandomAccessFile raf = null; try { System.out.println( "Securely deleting " + file + " which is of length " + size + " bytes..."); raf = new RandomAccessFile(file, "rw"); raf.seek(0); long count; byte[] buf = new byte[4096]; // First zero it out count = 0; while (count < size) { int written = (int) Math.min(buf.length, size - count); raf.write(buf, 0, written); count += written; } raf.getFD().sync(); // Then ffffff it out for (int i = 0; i < buf.length; i++) buf[i] = (byte) 0xFF; raf.seek(0); count = 0; while (count < size) { int written = (int) Math.min(buf.length, size - count); raf.write(buf, 0, written); count += written; } raf.getFD().sync(); // Then random data random.nextBytes(buf); raf.seek(0); count = 0; while (count < size) { int written = (int) Math.min(buf.length, size - count); raf.write(buf, 0, written); count += written; } raf.getFD().sync(); raf.seek(0); // Then 0's again for (int i = 0; i < buf.length; i++) buf[i] = 0; count = 0; while (count < size) { int written = (int) Math.min(buf.length, size - count); raf.write(buf, 0, written); count += written; } raf.getFD().sync(); raf.close(); raf = null; } finally { Closer.close(raf); } } if ((!file.delete()) && file.exists()) throw new IOException("Unable to delete file " + file); }
/** * Delete Tag from file * * @param raf * @param tempRaf * @throws IOException * @throws CannotWriteException */ public void delete(RandomAccessFile raf, RandomAccessFile tempRaf) throws IOException, CannotWriteException { // This will save the file without any Comment or PictureData blocks FlacTag emptyTag = new FlacTag(null, new ArrayList<MetadataBlockDataPicture>()); raf.seek(0); tempRaf.seek(0); write(emptyTag, raf, tempRaf); }
public long Seek(long offset, int seekOrigin) throws java.io.IOException { if (seekOrigin == STREAM_SEEK_SET) { _file.seek(offset); } else if (seekOrigin == STREAM_SEEK_CUR) { _file.seek(offset + _file.getFilePointer()); } return _file.getFilePointer(); }
private static RegionFile fixNegativeOffset(File regionFileFile) { FMLLog.log( Level.WARNING, "Region file " + regionFileFile + " is corrupted: negative offset. Attempting to fix."); try { Files.copy( regionFileFile, new File(regionFileFile.getParentFile(), regionFileFile.getName() + ".bak")); } catch (IOException e) { FMLLog.log(Level.SEVERE, e, "Failed to back up corrupt region file."); } try { RandomAccessFile dataFile = new RandomAccessFile(regionFileFile, "rw"); try { int length; if (dataFile.length() < 4096L) { for (length = 0; length < 1024; ++length) { dataFile.writeInt(0); } for (length = 0; length < 1024; ++length) { dataFile.writeInt(0); } } if ((dataFile.length() & 4095L) != 0L) { for (length = 0; (long) length < (dataFile.length() & 4095L); ++length) { dataFile.write(0); } } length = (int) dataFile.length() / 4096; dataFile.seek(0L); for (int i = 0; i < 1024; ++i) { int offset = dataFile.readInt(); if (offset != 0 && (offset >> 8) + (offset & 255) <= length) { for (int var5 = 0; var5 < (offset & 255); ++var5) { if ((offset >> 8) + var5 < 0) { dataFile.seek(dataFile.getFilePointer() - 4); dataFile.writeInt(0); break; } } } } } finally { dataFile.close(); } } catch (Throwable t) { FMLLog.log(Level.SEVERE, t, "Failed to fix negative offset index in " + regionFileFile); throw UnsafeUtil.throwIgnoreChecked(t); } return new RegionFile(regionFileFile); }
/** * 使用切割的方式来替换给定文件中的一段数据 * * @param file 给定的文件 * @param off 要替换的一段数据的开始位置(包括) * @param length 要替换的一段数据的长度,大于1 * @param newData 用来替换旧数据的新数据 * @throws IOException * @throws LengthTooBigException (fileLength - (off + length)) > 31457280 * 因为本方法采用的是先把需要替换的数据之后的数据读到内存中,然后将文件截短,最后把之前保存的数据写到文件中。因此读到内存中的数据不能太大 */ public static void replaceFileDataByCutWay(File file, long off, long length, byte[] newData) throws IOException, LengthTooBigException { // 获取文件长度 long fileLength = file.length(); // 验证数据合法性 CheckingUtils.valiLongValue(off, 0, fileLength - 1, "off"); CheckingUtils.valiLongValue(off + length, off + 1, fileLength, "length"); CheckingUtils.valiObjectIsNull(newData, "newData"); if (newData.length > 0) { // 计算需读到内存的数据的长度 long keepDataLength = fileLength - (off + length); // 如果需要读到内存的数据长度为0 if (keepDataLength == 0) { // 打开原文件 RandomAccessFile raf = new RandomAccessFile(file, "rw"); // 设置长度 raf.setLength(off); // 将新数据写到末尾去 raf.write(newData); // 关闭原文件 raf.close(); } else if (keepDataLength <= 31457280) { // 打开原文件 RandomAccessFile raf = new RandomAccessFile(file, "rw"); // 读取要保存的数据 byte[] keepData = new byte[(int) keepDataLength]; raf.seek(off + length); raf.read(keepData); // 将文件截掉合适的长度 if (length != 0) { raf.setLength(fileLength - length); } // 写入新数据 raf.seek(off); raf.write(newData); // 写入保存的数据 raf.write(keepData); // 关闭原文件 raf.close(); } else { throw new LengthTooBigException( "Need to read the length of data of the memory more than 30720 ((fileLength - (off + length)) > 30720)"); } } }
private void writeIntoFile(int numOfDir, int numOfFile) { String dirString = String.valueOf(numOfDir) + ".dir"; String fileString = String.valueOf(numOfFile) + ".dat"; File dbDir = tableDir.toPath().resolve(dirString).normalize().toFile(); if (!dbDir.isDirectory()) { dbDir.mkdir(); } File dbFile = dbDir.toPath().resolve(fileString).normalize().toFile(); if (list[numOfDir][numOfFile].isEmpty()) { dbFile.delete(); if (dbDir.list().length == 0) { dbDir.delete(); } return; } RandomAccessFile db; try { db = new RandomAccessFile(dbFile, "rw"); try { db.setLength(0); Iterator<Map.Entry<String, String>> it; it = list[numOfDir][numOfFile].entrySet().iterator(); long[] pointers = new long[list[numOfDir][numOfFile].size()]; int counter = 0; while (it.hasNext()) { Map.Entry<String, String> m = (Map.Entry<String, String>) it.next(); String key = m.getKey(); db.write(key.getBytes("UTF-8")); db.write("\0".getBytes("UTF-8")); pointers[counter] = db.getFilePointer(); db.seek(pointers[counter] + 4); ++counter; } it = list[numOfDir][numOfFile].entrySet().iterator(); counter = 0; while (it.hasNext()) { Map.Entry<String, String> m = (Map.Entry<String, String>) it.next(); String value = m.getValue(); int curPointer = (int) db.getFilePointer(); db.seek(pointers[counter]); db.writeInt(curPointer); db.seek(curPointer); db.write(value.getBytes("UTF-8")); ++counter; } } catch (Exception e) { db.close(); throw new Exception(e); } db.close(); if (dbDir.list().length == 0) { dbDir.delete(); } } catch (Exception e) { throw new IllegalArgumentException(); } }
/** * Writes all necessary data for this entry. * * @since 1.1 */ public void closeEntry() throws IOException { if (entry == null) { return; } long realCrc = crc.getValue(); crc.reset(); if (entry.getMethod() == DEFLATED) { def.finish(); while (!def.finished()) { deflate(); } entry.setSize(def.getTotalIn()); entry.setComprSize(def.getTotalOut()); entry.setCrc(realCrc); def.reset(); written += entry.getCompressedSize(); } else if (raf == null) { if (entry.getCrc() != realCrc) { throw new SwcException.BadCRC(Long.toHexString(entry.getCrc()), Long.toHexString(realCrc)); } if (entry.getSize() != written - dataStart) { throw new SwcException.BadZipSize( entry.getName(), entry.getSize() + "", (written - dataStart) + ""); } } else { /* method is STORED and we used RandomAccessFile */ long size = written - dataStart; entry.setSize(size); entry.setComprSize(size); entry.setCrc(realCrc); } // If random access output, write the local file header containing // the correct CRC and compressed/uncompressed sizes if (raf != null) { long save = raf.getFilePointer(); raf.seek(localDataStart); writeOut((new ZipLong(entry.getCrc())).getBytes()); writeOut((new ZipLong(entry.getCompressedSize())).getBytes()); writeOut((new ZipLong(entry.getSize())).getBytes()); raf.seek(save); } writeDataDescriptor(entry); entry = null; }
public static void main(String args[]) { try { String fname = "d:\\q.txt"; String mode; // mode = "r";//r : file must exist mode = "rw"; // rw : file will be created or opened // open the file RandomAccessFile raf = new RandomAccessFile(fname, mode); /* //seek and write demo raf.seek(10);//position file r/w pointer at index 10 wrt BOF //a seek beyond file size causes file to grow upto the seek value raf.write(65);//raf.write('A'); */ // r/w java datatypes int i1, i2; float f1, f2; char c1, c2; String s1, s2; i1 = -10; f1 = 1234.5678F; c1 = 'q'; s1 = "hello files"; raf.seek(0); // reach BOF raf.writeInt(i1); raf.writeFloat(f1); raf.writeChar(c1); raf.writeUTF(s1); raf.seek(0); // reach BOF i2 = raf.readInt(); f2 = raf.readFloat(); c2 = raf.readChar(); s2 = raf.readUTF(); System.out.println(i2); System.out.println(f2); System.out.println(c2); System.out.println(s2); // close the file raf.close(); } catch (IOException ex) { System.out.println(ex); // ex converts into ex.toString() } } // main
/** * Search index file for a primary key value * * @param pkvalue the primary key value to search for * @param position [0] = index entry, [1] = table row * @throws Exception */ public boolean searchIndex(String pkvalue, Positions pos) throws Exception { boolean found = false; boolean end = false; if (!indexExists(def.getPK())) { throw new Exception("No index created"); } int pkindex = def.getColPosition(def.getPK()); if (pkindex == -1) { throw new Exception("Primary key does not exist"); } // calculate index = hash value String s_value = pkvalue.trim(); int index = hash(s_value); Integer[] size = def.getSizes(); int recordSize = idxFixedRecordLength() + size[pkindex]; indexFile.seek(index * recordSize); String line = indexFile.readLine(); if (line.substring(0, 1).equals(" ")) { // Empty record, end of search found = false; return found; } String[] parts = line.split("#"); String s_part = parts[2].trim(); if (s_part.equals(pkvalue) && !(parts[1].equals("D"))) { found = true; pos.index = Integer.parseInt(parts[0].trim()); pos.table = Integer.parseInt(parts[3].trim()); } while (!found && !end) { if (parts[4].substring(0, 4).equals("null")) { // end of linked list end = true; found = false; } else { index = Integer.parseInt(parts[4].trim()); indexFile.seek(index * recordSize); line = indexFile.readLine(); parts = line.split("#"); if (parts[2].trim().equals(pkvalue) && !(parts[1].equals("D"))) { found = true; pos.index = Integer.parseInt(parts[0].trim()); pos.table = Integer.parseInt(parts[3].trim()); } } } return found; }
/** * Close the I/O stream and save any unsaved data. * * @exception IOException from library call */ public void close() throws IOException { if (headerSize > 0) { // write number of rows, CRC outStream.seek(NUMROWS_OFFSET); outStream.writeLong(numRows); outStream.seek(headerSize - CRC_SIZE); outStream.writeInt(CRC); } outStream.close(); }
/** * Closes the output file. MUST be called to have a correct stream. * * @exception IOException if there was an exception closing the Audio Writer. */ public void close() throws IOException { /* update the total file length field from RIFF chunk */ raf.seek(4); int fileLength = (int) raf.length() - 8; writeInt(raf, fileLength); /* update the data chunk length size */ raf.seek(40); writeInt(raf, size); /* close the output file */ raf.close(); }
public void spwanServer() throws Exception { OGlobalConfiguration.WAL_FUZZY_CHECKPOINT_INTERVAL.setValue(5); String buildDirectory = System.getProperty("buildDirectory", "."); buildDirectory += "/uniqueIndexCrashRestore"; buildDir = new File(buildDirectory); if (buildDir.exists()) buildDir.delete(); buildDir.mkdir(); final File mutexFile = new File(buildDir, "mutex.ct"); final RandomAccessFile mutex = new RandomAccessFile(mutexFile, "rw"); mutex.seek(0); mutex.write(0); buildDirectory = buildDir.getCanonicalPath(); buildDir = new File(buildDirectory); String javaExec = System.getProperty("java.home") + "/bin/java"; javaExec = new File(javaExec).getCanonicalPath(); System.setProperty("ORIENTDB_HOME", buildDirectory); ProcessBuilder processBuilder = new ProcessBuilder( javaExec, "-Xmx2048m", "-XX:MaxDirectMemorySize=512g", "-classpath", System.getProperty("java.class.path"), "-DORIENTDB_HOME=" + buildDirectory, "-DmutexFile=" + mutexFile.getCanonicalPath(), RemoteDBRunner.class.getName()); processBuilder.inheritIO(); serverProcess = processBuilder.start(); System.out.println( IndexCrashRestoreSingleValueIT.class.getSimpleName() + ": Wait for server start"); boolean started = false; do { Thread.sleep(5000); mutex.seek(0); started = mutex.read() == 1; } while (!started); mutex.close(); mutexFile.delete(); System.out.println( IndexCrashRestoreSingleValueIT.class.getSimpleName() + ": Server was started"); }
/** * Provides wrapped access to the managed random access file. Use only this method, and not the * attribute reference directly. Calling this method will transparently ensure, that the returned * random access file references an open instance which is intelligently reset to the previous * file pointer position if necessary (lazy seeking) * * @return the managed random access file handle for internal use * @throws IOException */ protected synchronized RandomAccessFile raf() throws IOException { if (isOpen == false) { reOpen(); // restore previous file pointer position raf.seek(currentFilePointer); } else { // file was open; check if file pointer has been modified, // i.e. if a seek is necessary if (raf.getFilePointer() != currentFilePointer) { raf.seek(currentFilePointer); } } return raf; }
/** * 从给定的随机访问文件的给定的偏移量处开始读取给定个数的字节 * * @param raf 给定的随机访问文件,读取完之后文件不会关闭,指针也会复原 * @param off 给定的偏移量 * @param length 最多要读取的字节个数 * @return 一个字节数组 * @throws IOException */ public static byte[] readByte(RandomAccessFile raf, long off, int length) throws IOException { byte[] result; long lastPointer = raf.getFilePointer(); raf.seek(off); byte[] bytes = new byte[length]; int number = raf.read(bytes); raf.seek(lastPointer); if (number == length) { result = bytes; } else { result = new byte[number]; System.arraycopy(bytes, 0, result, 0, number); } return result; }
/** * Parses the zip64 end of central directory record locator. The locator must be placed * immediately before the end of central directory (eocd) record starting at {@code eocdOffset}. * * <p>The position of the file cursor for {@code raf} after a call to this method is undefined an * callers must reposition it after each call to this method. */ public static long parseZip64EocdRecordLocator(RandomAccessFile raf, long eocdOffset) throws IOException { // The spec stays curiously silent about whether a zip file with an EOCD record, // a zip64 locator and a zip64 eocd record is considered "empty". In our implementation, // we parse all records and read the counts from them instead of drawing any size or // layout based information. if (eocdOffset > ZIP64_LOCATOR_SIZE) { raf.seek(eocdOffset - ZIP64_LOCATOR_SIZE); if (Integer.reverseBytes(raf.readInt()) == ZIP64_LOCATOR_SIGNATURE) { byte[] zip64EocdLocator = new byte[ZIP64_LOCATOR_SIZE - 4]; raf.readFully(zip64EocdLocator); ByteBuffer buf = ByteBuffer.wrap(zip64EocdLocator).order(ByteOrder.LITTLE_ENDIAN); final int diskWithCentralDir = buf.getInt(); final long zip64EocdRecordOffset = buf.getLong(); final int numDisks = buf.getInt(); if (numDisks != 1 || diskWithCentralDir != 0) { throw new ZipException("Spanned archives not supported"); } return zip64EocdRecordOffset; } } return -1; }
public void loadMetaData() throws IOException { pageFile.seek(offset); nextPageId = pageFile.readInt(); currentFill = pageFile.readInt(); bloomfilter = pageFile.readInt(); type = pageFile.readByte(); }