/** * Test stream type detection based on stream content. * * <p>Tests three things with the input text: * * <p>1) conversion if input was declared as text * * <p>2) conversion if input was declared as potentially text (AUTO_...) and is in fact text * * <p>3) conversion if modified input (now with binary characters) was declared as potentially * text but now contains binary characters * * <p> * * @param streamTypeText is the enum meaning that the input is definitely text (no binary check at * all) * @param streamTypeWithBinaryCheck is the enum meaning that the input may be text (binary check * is done) * @param input is a text input without binary characters * @param expectedConversion is the expected converted input without binary characters * @throws Exception */ private void testCheckin( EolStreamType streamTypeText, EolStreamType streamTypeWithBinaryCheck, String input, String expectedConversion) throws Exception { byte[] inputBytes = input.getBytes(StandardCharsets.UTF_8); byte[] expectedConversionBytes = expectedConversion.getBytes(StandardCharsets.UTF_8); // test using input text and assuming it was declared TEXT try (InputStream in = EolStreamTypeUtil.wrapInputStream(new ByteArrayInputStream(inputBytes), streamTypeText)) { byte[] b = new byte[1024]; int len = IO.readFully(in, b, 0); assertArrayEquals(expectedConversionBytes, Arrays.copyOf(b, len)); } // test using input text and assuming it was declared AUTO, using binary // detection try (InputStream in = EolStreamTypeUtil.wrapInputStream( new ByteArrayInputStream(inputBytes), streamTypeWithBinaryCheck)) { byte[] b = new byte[1024]; int len = IO.readFully(in, b, 0); assertArrayEquals(expectedConversionBytes, Arrays.copyOf(b, len)); } // now pollute input text with some binary bytes inputBytes = extendWithBinaryData(inputBytes); expectedConversionBytes = extendWithBinaryData(expectedConversionBytes); // again, test using input text and assuming it was declared TEXT try (InputStream in = EolStreamTypeUtil.wrapInputStream(new ByteArrayInputStream(inputBytes), streamTypeText)) { byte[] b = new byte[1024]; int len = IO.readFully(in, b, 0); assertArrayEquals(expectedConversionBytes, Arrays.copyOf(b, len)); } // again, test using input text and assuming it was declared AUTO, using // binary // detection try (InputStream in = EolStreamTypeUtil.wrapInputStream( new ByteArrayInputStream(inputBytes), streamTypeWithBinaryCheck)) { byte[] b = new byte[1024]; int len = IO.readFully(in, b, 0); // expect no conversion assertArrayEquals(inputBytes, Arrays.copyOf(b, len)); } }
public String getProjectDescription(final Project.NameKey name) throws RepositoryNotFoundException, IOException { final Repository e = openRepository(name); try { final File d = new File(e.getDirectory(), "description"); String description; try { description = RawParseUtils.decode(IO.readFully(d)); } catch (FileNotFoundException err) { return null; } if (description != null) { description = description.trim(); if (description.isEmpty()) { description = null; } if (UNNAMED.equals(description)) { description = null; } } return description; } finally { e.close(); } }
public void testSkip() throws IOException { init(32, 15); copy(2, 2); insert("ab"); insert("cd"); copy(4, 4); copy(0, 2); insert("efg"); assertValidState(); for (int p = 0; p < data.length; p++) { byte[] act = new byte[data.length]; System.arraycopy(data, 0, act, 0, p); DeltaStream in = open(); IO.skipFully(in, p); assertEquals(data.length - p, in.read(act, p, data.length - p)); assertEquals(-1, in.read()); assertTrue("skipping " + p, Arrays.equals(data, act)); } // Skip all the way to the end should still recognize EOF. DeltaStream in = open(); IO.skipFully(in, data.length); assertEquals(-1, in.read()); assertEquals(0, in.skip(1)); // Skip should not open the base as we move past it, but it // will open when we need to start copying data from it. final boolean[] opened = new boolean[1]; in = new DeltaStream(new ByteArrayInputStream(delta)) { @Override protected long getBaseSize() throws IOException { return base.length; } @Override protected InputStream openBase() throws IOException { opened[0] = true; return new ByteArrayInputStream(base); } }; IO.skipFully(in, 7); assertFalse("not yet open", opened[0]); assertEquals(data[7], in.read()); assertTrue("now open", opened[0]); }
private byte[] read(JarEntry entry) throws IOException { byte[] data = new byte[(int) entry.getSize()]; InputStream in = jarFile.getInputStream(entry); try { IO.readFully(in, data, 0, data.length); } finally { in.close(); } return data; }
static PathEdit add(final Repository db, final File workdir, final String path) throws FileNotFoundException, IOException { ObjectInserter inserter = db.newObjectInserter(); final File f = new File(workdir, path); final ObjectId id = inserter.insert(Constants.OBJ_BLOB, IO.readFully(f)); return new PathEdit(path) { public void apply(DirCacheEntry ent) { ent.setFileMode(FileMode.REGULAR_FILE); ent.setLength(f.length()); ent.setObjectId(id); } }; }
/* (non-Javadoc) * @see org.eclipse.jgit.internal.storage.file.ReflogReaader#getReverseEntries(int) */ public List<ReflogEntry> getReverseEntries(int max) throws IOException { final byte[] log; try { log = IO.readFully(logName); } catch (FileNotFoundException e) { if (logName.exists()) { throw e; } return Collections.emptyList(); } int rs = RawParseUtils.prevLF(log, log.length); List<ReflogEntry> ret = new ArrayList<ReflogEntry>(); while (rs >= 0 && max-- > 0) { rs = RawParseUtils.prevLF(log, rs); ReflogEntry entry = new ReflogEntryImpl(log, rs < 0 ? 0 : rs + 2); ret.add(entry); } return ret; }
private byte[] idBufferBlob(final Entry e) { try { final InputStream is = e.openInputStream(); if (is == null) return zeroid; try { state.initializeDigestAndReadBuffer(); final long len = e.getLength(); if (!mightNeedCleaning()) return computeHash(is, len); if (len <= MAXIMUM_FILE_SIZE_TO_READ_FULLY) { ByteBuffer rawbuf = IO.readWholeStream(is, (int) len); byte[] raw = rawbuf.array(); int n = rawbuf.limit(); if (!isBinary(raw, n)) { rawbuf = filterClean(raw, n); raw = rawbuf.array(); n = rawbuf.limit(); } return computeHash(new ByteArrayInputStream(raw, 0, n), n); } if (isBinary(e)) return computeHash(is, len); final long canonLen; final InputStream lenIs = filterClean(e.openInputStream()); try { canonLen = computeLength(lenIs); } finally { safeClose(lenIs); } return computeHash(filterClean(is), canonLen); } finally { safeClose(is); } } catch (IOException err) { // Can't read the file? Don't report the failure either. return zeroid; } }
/* (non-Javadoc) * @see org.eclipse.jgit.internal.storage.file.ReflogReaader#getReverseEntry(int) */ public ReflogEntry getReverseEntry(int number) throws IOException { if (number < 0) throw new IllegalArgumentException(); final byte[] log; try { log = IO.readFully(logName); } catch (FileNotFoundException e) { if (logName.exists()) { throw e; } return null; } int rs = RawParseUtils.prevLF(log, log.length); int current = 0; while (rs >= 0) { rs = RawParseUtils.prevLF(log, rs); if (number == current) return new ReflogEntryImpl(log, rs < 0 ? 0 : rs + 2); current++; } return null; }
/** * Load the configuration as a Git text style configuration file. * * <p>If the file does not exist, this configuration is cleared, and thus behaves the same as * though the file exists, but is empty. * * @throws IOException the file could not be read (but does exist). * @throws ConfigInvalidException the file is not a properly formatted configuration file. */ @Override public void load() throws IOException, ConfigInvalidException { final FileSnapshot oldSnapshot = snapshot; final FileSnapshot newSnapshot = FileSnapshot.save(getFile()); try { final byte[] in = IO.readFully(getFile()); final ObjectId newHash = hash(in); if (hash.equals(newHash)) { if (oldSnapshot.equals(newSnapshot)) oldSnapshot.setClean(newSnapshot); else snapshot = newSnapshot; } else { final String decoded; if (in.length >= 3 && in[0] == (byte) 0xEF && in[1] == (byte) 0xBB && in[2] == (byte) 0xBF) { decoded = RawParseUtils.decode(RawParseUtils.UTF8_CHARSET, in, 3, in.length); utf8Bom = true; } else { decoded = RawParseUtils.decode(in); } fromText(decoded); snapshot = newSnapshot; hash = newHash; } } catch (FileNotFoundException noFile) { clear(); snapshot = newSnapshot; } catch (IOException e) { final IOException e2 = new IOException(MessageFormat.format(JGitText.get().cannotReadFile, getFile())); e2.initCause(e); throw e2; } catch (ConfigInvalidException e) { throw new ConfigInvalidException( MessageFormat.format(JGitText.get().cannotReadFile, getFile()), e); } }
private String loadDescriptionText(Repository repo) throws IOException { String desc = null; StoredConfig config = repo.getConfig(); IOException configError = null; try { config.load(); desc = config.getString("gitweb", null, "description"); } catch (ConfigInvalidException e) { configError = new IOException(e); } if (desc == null) { File descFile = new File(repo.getDirectory(), "description"); if (descFile.exists()) { desc = new String(IO.readFully(descFile)); if (DEFAULT_DESCRIPTION.equals(CharMatcher.whitespace().trimFrom(desc))) { desc = null; } } else if (configError != null) { throw configError; } } return desc; }
@Test public void testReadWriteV3() throws Exception { final File file = pathOf("gitgit.index.v3"); final DirCache dc = new DirCache(file, FS.DETECTED); dc.read(); assertEquals(10, dc.getEntryCount()); assertV3TreeEntry(0, "dir1/file1.txt", false, false, dc); assertV3TreeEntry(1, "dir2/file2.txt", true, false, dc); assertV3TreeEntry(2, "dir3/file3.txt", false, false, dc); assertV3TreeEntry(3, "dir3/file3a.txt", true, false, dc); assertV3TreeEntry(4, "dir4/file4.txt", true, false, dc); assertV3TreeEntry(5, "dir4/file4a.txt", false, false, dc); assertV3TreeEntry(6, "file.txt", true, false, dc); assertV3TreeEntry(7, "newdir1/newfile1.txt", false, true, dc); assertV3TreeEntry(8, "newdir1/newfile2.txt", false, true, dc); assertV3TreeEntry(9, "newfile.txt", false, true, dc); final ByteArrayOutputStream bos = new ByteArrayOutputStream(); dc.writeTo(bos); final byte[] indexBytes = bos.toByteArray(); final byte[] expectedBytes = IO.readFully(file); assertArrayEquals(expectedBytes, indexBytes); }
private String readString(final int len) throws IOException { final byte[] raw = new byte[len]; IO.readFully(rawIn, raw, 0, len); return RawParseUtils.decode(Constants.CHARSET, raw, 0, len); }
LooseRef scanRef(LooseRef ref, String name) throws IOException { final File path = fileFor(name); FileSnapshot currentSnapshot = null; if (ref != null) { currentSnapshot = ref.getSnapShot(); if (!currentSnapshot.isModified(path)) return ref; name = ref.getName(); } final int limit = 4096; final byte[] buf; FileSnapshot otherSnapshot = FileSnapshot.save(path); try { buf = IO.readSome(path, limit); } catch (FileNotFoundException noFile) { if (path.exists() && path.isFile()) { throw noFile; } return null; // doesn't exist or no file; not a reference. } int n = buf.length; if (n == 0) return null; // empty file; not a reference. if (isSymRef(buf, n)) { if (n == limit) return null; // possibly truncated ref // trim trailing whitespace while (0 < n && Character.isWhitespace(buf[n - 1])) n--; if (n < 6) { String content = RawParseUtils.decode(buf, 0, n); throw new IOException(MessageFormat.format(JGitText.get().notARef, name, content)); } final String target = RawParseUtils.decode(buf, 5, n); if (ref != null && ref.isSymbolic() && ref.getTarget().getName().equals(target)) { assert (currentSnapshot != null); currentSnapshot.setClean(otherSnapshot); return ref; } return newSymbolicRef(otherSnapshot, name, target); } if (n < OBJECT_ID_STRING_LENGTH) return null; // impossibly short object identifier; not a reference. final ObjectId id; try { id = ObjectId.fromString(buf, 0); if (ref != null && !ref.isSymbolic() && id.equals(ref.getTarget().getObjectId())) { assert (currentSnapshot != null); currentSnapshot.setClean(otherSnapshot); return ref; } } catch (IllegalArgumentException notRef) { while (0 < n && Character.isWhitespace(buf[n - 1])) n--; String content = RawParseUtils.decode(buf, 0, n); IOException ioException = new IOException(MessageFormat.format(JGitText.get().notARef, name, content)); ioException.initCause(notRef); throw ioException; } return new LooseUnpeeled(otherSnapshot, name, id); }
private ByteBuffer filterClean(byte[] src, int n) throws IOException { InputStream in = new ByteArrayInputStream(src); return IO.readWholeStream(filterClean(in), n); }
public List<String> outLines() { return IO.readLines(out.toString()); }
public List<String> errLines() { return IO.readLines(err.toString()); }
public String getEntityContent() throws IOException { Preconditions.checkNotNull(response, "Response is not initialized."); Preconditions.checkNotNull(response.getEntity(), "Response.Entity is not initialized."); ByteBuffer buf = IO.readWholeStream(response.getEntity().getContent(), 1024); return RawParseUtils.decode(buf.array(), buf.arrayOffset(), buf.limit()).trim(); }
private void readFrom(final InputStream inStream) throws IOException, CorruptObjectException { final BufferedInputStream in = new BufferedInputStream(inStream); final MessageDigest md = Constants.newMessageDigest(); // Read the index header and verify we understand it. // final byte[] hdr = new byte[20]; IO.readFully(in, hdr, 0, 12); md.update(hdr, 0, 12); if (!is_DIRC(hdr)) throw new CorruptObjectException(JGitText.get().notADIRCFile); final int ver = NB.decodeInt32(hdr, 4); boolean extended = false; if (ver == 3) extended = true; else if (ver != 2) throw new CorruptObjectException( MessageFormat.format(JGitText.get().unknownDIRCVersion, Integer.valueOf(ver))); entryCnt = NB.decodeInt32(hdr, 8); if (entryCnt < 0) throw new CorruptObjectException(JGitText.get().DIRCHasTooManyEntries); snapshot = FileSnapshot.save(liveFile); int smudge_s = (int) (snapshot.lastModified() / 1000); int smudge_ns = ((int) (snapshot.lastModified() % 1000)) * 1000000; // Load the individual file entries. // final int infoLength = DirCacheEntry.getMaximumInfoLength(extended); final byte[] infos = new byte[infoLength * entryCnt]; sortedEntries = new DirCacheEntry[entryCnt]; final MutableInteger infoAt = new MutableInteger(); for (int i = 0; i < entryCnt; i++) sortedEntries[i] = new DirCacheEntry(infos, infoAt, in, md, smudge_s, smudge_ns); // After the file entries are index extensions, and then a footer. // for (; ; ) { in.mark(21); IO.readFully(in, hdr, 0, 20); if (in.read() < 0) { // No extensions present; the file ended where we expected. // break; } in.reset(); md.update(hdr, 0, 8); IO.skipFully(in, 8); long sz = NB.decodeUInt32(hdr, 4); switch (NB.decodeInt32(hdr, 0)) { case EXT_TREE: { if (Integer.MAX_VALUE < sz) { throw new CorruptObjectException( MessageFormat.format( JGitText.get().DIRCExtensionIsTooLargeAt, formatExtensionName(hdr), Long.valueOf(sz))); } final byte[] raw = new byte[(int) sz]; IO.readFully(in, raw, 0, raw.length); md.update(raw, 0, raw.length); tree = new DirCacheTree(raw, new MutableInteger(), null); break; } default: if (hdr[0] >= 'A' && hdr[0] <= 'Z') { // The extension is optional and is here only as // a performance optimization. Since we do not // understand it, we can safely skip past it, after // we include its data in our checksum. // skipOptionalExtension(in, md, hdr, sz); } else { // The extension is not an optimization and is // _required_ to understand this index format. // Since we did not trap it above we must abort. // throw new CorruptObjectException( MessageFormat.format( JGitText.get().DIRCExtensionNotSupportedByThisVersion, formatExtensionName(hdr))); } } } readIndexChecksum = md.digest(); if (!Arrays.equals(readIndexChecksum, hdr)) { throw new CorruptObjectException(JGitText.get().DIRCChecksumMismatch); } }