@Test public void testEqualsWithWhitespace() { final RawText a = new RawText(Constants.encodeASCII("foo-a\n \n a b c\na \n foo\na b c\n")); final RawText b = new RawText(Constants.encodeASCII("foo-a b\n\nab c\na\nfoo\na b c \n")); // "foo-a" != "foo-a b" assertFalse(cmp.equals(a, 0, b, 0)); assertFalse(cmp.equals(b, 0, a, 0)); // " " == "" assertTrue(cmp.equals(a, 1, b, 1)); assertTrue(cmp.equals(b, 1, a, 1)); // " a b c" != "ab c" assertFalse(cmp.equals(a, 2, b, 2)); assertFalse(cmp.equals(b, 2, a, 2)); // "a " == "a" assertTrue(cmp.equals(a, 3, b, 3)); assertTrue(cmp.equals(b, 3, a, 3)); // " foo" != "foo" assertFalse(cmp.equals(a, 4, b, 4)); assertFalse(cmp.equals(b, 4, a, 4)); // "a b c" == "a b c " assertTrue(cmp.equals(a, 5, b, 5)); assertTrue(cmp.equals(b, 5, a, 5)); }
private static void assertHash(RevObject id, byte[] bin) { MessageDigest md = Constants.newMessageDigest(); md.update(Constants.encodedTypeString(id.getType())); md.update((byte) ' '); md.update(Constants.encodeASCII(bin.length)); md.update((byte) 0); md.update(bin); assertEquals(id, ObjectId.fromRaw(md.digest())); }
void writeTo(final OutputStream os) throws IOException { final MessageDigest foot = Constants.newMessageDigest(); final DigestOutputStream dos = new DigestOutputStream(os, foot); boolean extended = false; for (int i = 0; i < entryCnt; i++) extended |= sortedEntries[i].isExtended(); // Write the header. // final byte[] tmp = new byte[128]; System.arraycopy(SIG_DIRC, 0, tmp, 0, SIG_DIRC.length); NB.encodeInt32(tmp, 4, extended ? 3 : 2); NB.encodeInt32(tmp, 8, entryCnt); dos.write(tmp, 0, 12); // Write the individual file entries. final int smudge_s; final int smudge_ns; if (myLock != null) { // For new files we need to smudge the index entry // if they have been modified "now". Ideally we'd // want the timestamp when we're done writing the index, // so we use the current timestamp as a approximation. myLock.createCommitSnapshot(); snapshot = myLock.getCommitSnapshot(); smudge_s = (int) (snapshot.lastModified() / 1000); smudge_ns = ((int) (snapshot.lastModified() % 1000)) * 1000000; } else { // Used in unit tests only smudge_ns = 0; smudge_s = 0; } // Check if tree is non-null here since calling updateSmudgedEntries // will automatically build it via creating a DirCacheIterator final boolean writeTree = tree != null; if (repository != null && entryCnt > 0) updateSmudgedEntries(); for (int i = 0; i < entryCnt; i++) { final DirCacheEntry e = sortedEntries[i]; if (e.mightBeRacilyClean(smudge_s, smudge_ns)) e.smudgeRacilyClean(); e.write(dos); } if (writeTree) { final TemporaryBuffer bb = new TemporaryBuffer.LocalFile(); tree.write(tmp, bb); bb.close(); NB.encodeInt32(tmp, 0, EXT_TREE); NB.encodeInt32(tmp, 4, (int) bb.length()); dos.write(tmp, 0, 8); bb.writeTo(dos, null); } writeIndexChecksum = foot.digest(); os.write(writeIndexChecksum); os.close(); }
/** * Save the configuration as a Git text style configuration file. * * <p><b>Warning:</b> Although this method uses the traditional Git file locking approach to * protect against concurrent writes of the configuration file, it does not ensure that the file * has not been modified since the last read, which means updates performed by other objects * accessing the same backing file may be lost. * * @throws IOException the file could not be written. */ public void save() throws IOException { final byte[] out; final String text = toText(); if (utf8Bom) { final ByteArrayOutputStream bos = new ByteArrayOutputStream(); bos.write(0xEF); bos.write(0xBB); bos.write(0xBF); bos.write(text.getBytes(RawParseUtils.UTF8_CHARSET.name())); out = bos.toByteArray(); } else { out = Constants.encode(text); } final LockFile lf = new LockFile(getFile(), fs); if (!lf.lock()) throw new LockFailedException(getFile()); try { lf.setNeedSnapshot(true); lf.write(out); if (!lf.commit()) throw new IOException(MessageFormat.format(JGitText.get().cannotCommitWriteTo, getFile())); } finally { lf.unlock(); } snapshot = lf.getCommitSnapshot(); hash = hash(out); // notify the listeners fireConfigChangedEvent(); }
/** * Update the dumb client server info files. * * @throws Exception */ public void updateServerInfo() throws Exception { if (db instanceof FileRepository) { final FileRepository fr = (FileRepository) db; RefWriter rw = new RefWriter(fr.getAllRefs().values()) { @Override protected void writeFile(final String name, final byte[] bin) throws IOException { File path = new File(fr.getDirectory(), name); TestRepository.this.writeFile(path, bin); } }; rw.writePackedRefs(); rw.writeInfoRefs(); final StringBuilder w = new StringBuilder(); for (PackFile p : fr.getObjectDatabase().getPacks()) { w.append("P "); w.append(p.getPackFile().getName()); w.append('\n'); } writeFile( new File(new File(fr.getObjectDatabase().getDirectory(), "info"), "packs"), Constants.encodeASCII(w.toString())); } }
/** * Send a message to the client, if it supports receiving them. * * <p>If the client doesn't support receiving messages, the message will be discarded, with no * other indication to the caller or to the client. * * @param what string describing the problem identified by the hook. The string must not end with * an LF, and must not contain an LF. */ public void sendMessage(final String what) { try { if (msgOut != null) msgOut.write(Constants.encode(what + "\n")); } catch (IOException e) { // Ignore write failures. } }
/** * Creates the version 1 pack bitmap index files. * * @param dst the output stream to which the index will be written. */ public PackBitmapIndexWriterV1(final OutputStream dst) { out = new DigestOutputStream( dst instanceof BufferedOutputStream ? dst : new SafeBufferedOutputStream(dst), Constants.newMessageDigest()); dataOutput = new SimpleDataOutput(out); }
public void setProjectDescription(final Project.NameKey name, final String description) { // Update git's description file, in case gitweb is being used // try { final Repository e; final LockFile f; e = openRepository(name); try { f = new LockFile(new File(e.getDirectory(), "description"), FS.DETECTED); if (f.lock()) { String d = description; if (d != null) { d = d.trim(); if (d.length() > 0) { d += "\n"; } } else { d = ""; } f.write(Constants.encode(d)); f.commit(); } } finally { e.close(); } } catch (RepositoryNotFoundException e) { log.error("Cannot update description for " + name, e); } catch (IOException e) { log.error("Cannot update description for " + name, e); } }
/** * Construct a new substring pattern. * * @param patternText text to locate. This should be a literal string, as no meta-characters are * supported by this implementation. The string may not be the empty string. */ public RawSubStringPattern(final String patternText) { if (patternText.length() == 0) throw new IllegalArgumentException(JGitText.get().cannotMatchOnEmptyString); needleString = patternText; final byte[] b = Constants.encode(patternText); needle = new byte[b.length]; for (int i = 0; i < b.length; i++) needle[i] = lc(b[i]); }
private DirCacheEntry makeFile(final String path) throws Exception { final byte[] pathBytes = Constants.encode(path); final DirCacheEntry ent = new DirCacheEntry(path); ent.setFileMode(REGULAR_FILE); ent.setObjectId( new ObjectWriter(db) .computeBlobSha1(pathBytes.length, new ByteArrayInputStream(pathBytes))); return ent; }
public FakeTreeIterator(String pathName, FileMode fileMode) { super(prefix(pathName), new WorkingTreeOptions(AutoCRLF.FALSE)); mode = fileMode.getBits(); final int s = pathName.lastIndexOf('/'); final byte[] name = Constants.encode(pathName.substring(s + 1)); ensurePathCapacity(pathOffset + name.length, pathOffset); System.arraycopy(name, 0, path, pathOffset, name.length); pathLen = pathOffset + name.length; }
public void testEqualsWithoutWhitespace() { final RawText a = new RawText(cmp, Constants.encodeASCII("foo-a\nfoo-b\nfoo\n")); final RawText b = new RawText(cmp, Constants.encodeASCII("foo-b\nfoo-c\nf\n")); assertEquals(3, a.size()); assertEquals(3, b.size()); // foo-a != foo-b assertFalse(cmp.equals(a, 0, b, 0)); assertFalse(cmp.equals(b, 0, a, 0)); // foo-b == foo-b assertTrue(cmp.equals(a, 1, b, 0)); assertTrue(cmp.equals(b, 0, a, 1)); // foo != f assertFalse(cmp.equals(a, 2, b, 2)); assertFalse(cmp.equals(b, 2, a, 2)); }
@Override public String toString() { final StringBuilder s = new StringBuilder(); s.append(Constants.typeString(getType())); s.append(' '); s.append(name()); s.append(' '); appendCoreFlags(s); return s.toString(); }
/** * Send an error message to the client. * * <p>If any error messages are sent before the references are advertised to the client, the * errors will be sent instead of the advertisement and the receive operation will be aborted. All * clients should receive and display such early stage errors. * * <p>If the reference advertisements have already been sent, messages are sent in a side channel. * If the client doesn't support receiving messages, the message will be discarded, with no other * indication to the caller or to the client. * * <p>{@link PreReceiveHook}s should always try to use {@link ReceiveCommand#setResult(Result, * String)} with a result status of {@link Result#REJECTED_OTHER_REASON} to indicate any reasons * for rejecting an update. Messages attached to a command are much more likely to be returned to * the client. * * @param what string describing the problem identified by the hook. The string must not end with * an LF, and must not contain an LF. */ public void sendError(final String what) { if (refs == null) { if (advertiseError == null) advertiseError = new StringBuilder(); advertiseError.append(what).append('\n'); } else { try { if (msgOut != null) msgOut.write(Constants.encode("error: " + what + "\n")); } catch (IOException e) { // Ignore write failures. } } }
static { String[] list = new String[] { "AUX", "COM1", "COM2", "COM3", "COM4", //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$ //$NON-NLS-4$ //$NON-NLS-5$ "COM5", "COM6", "COM7", "COM8", "COM9", "CON", "LPT1", "LPT2", //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$ //$NON-NLS-4$ //$NON-NLS-5$ // //$NON-NLS-6$ //$NON-NLS-7$ //$NON-NLS-8$ "LPT3", "LPT4", "LPT5", "LPT6", "LPT7", "LPT8", "LPT9", "NUL", //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$ //$NON-NLS-4$ //$NON-NLS-5$ //$NON-NLS-6$ // //$NON-NLS-7$ //$NON-NLS-8$ "PRN" }; //$NON-NLS-1$ forbidden = new byte[list.length][]; for (int i = 0; i < list.length; ++i) forbidden[i] = Constants.encodeASCII(list[i]); }
public void start(final Environment env) throws IOException { Context old = sshScope.set(context); String message; try { message = messageFactory.get().getMessage(); } finally { sshScope.set(old); } err.write(Constants.encode(message.toString())); err.flush(); in.close(); out.close(); err.close(); exit.onExit(127); }
void parseCanonical(final RevWalk walk, final byte[] rawTag) throws CorruptObjectException { final MutableInteger pos = new MutableInteger(); final int oType; pos.value = 53; // "object $sha1\ntype " oType = Constants.decodeTypeString(this, rawTag, (byte) '\n', pos); walk.idBuffer.fromString(rawTag, 7); object = walk.lookupAny(walk.idBuffer, oType); int p = pos.value += 4; // "tag " final int nameEnd = RawParseUtils.nextLF(rawTag, p) - 1; tagName = RawParseUtils.decode(Constants.CHARSET, rawTag, p, nameEnd); if (walk.isRetainBody()) buffer = rawTag; flags |= PARSED; }
/** * Recursively get all entries within a subtree. * * @param path the subtree path to get all entries within. * @return all entries recursively contained within the subtree. */ public DirCacheEntry[] getEntriesWithin(String path) { if (path.length() == 0) { final DirCacheEntry[] r = new DirCacheEntry[sortedEntries.length]; System.arraycopy(sortedEntries, 0, r, 0, sortedEntries.length); return r; } if (!path.endsWith("/")) // $NON-NLS-1$ path += "/"; // $NON-NLS-1$ final byte[] p = Constants.encode(path); final int pLen = p.length; int eIdx = findEntry(p, pLen); if (eIdx < 0) eIdx = -(eIdx + 1); final int lastIdx = nextEntry(p, pLen, eIdx); final DirCacheEntry[] r = new DirCacheEntry[lastIdx - eIdx]; System.arraycopy(sortedEntries, eIdx, r, 0, r.length); return r; }
private String rand(final HttpServletRequest req, final String suffix) throws UnsupportedEncodingException { // Produce a random suffix that is difficult (or nearly impossible) // for an attacker to guess in advance. This reduces the risk that // an attacker could upload a *.class file and have us send a ZIP // that can be invoked through an applet tag in the victim's browser. // final MessageDigest md = Constants.newMessageDigest(); final byte[] buf = new byte[8]; NB.encodeInt32(buf, 0, req.getRemotePort()); md.update(req.getRemoteAddr().getBytes("UTF-8")); md.update(buf, 0, 4); NB.encodeInt64(buf, 0, System.currentTimeMillis()); md.update(buf, 0, 8); rng.nextBytes(buf); md.update(buf, 0, 8); return suffix + "-" + ObjectId.fromRaw(md.digest()).name(); }
private PackedRefList readPackedRefs() throws IOException { int maxStaleRetries = 5; int retries = 0; while (true) { final FileSnapshot snapshot = FileSnapshot.save(packedRefsFile); final BufferedReader br; final MessageDigest digest = Constants.newMessageDigest(); try { br = new BufferedReader( new InputStreamReader( new DigestInputStream(new FileInputStream(packedRefsFile), digest), CHARSET)); } catch (FileNotFoundException noPackedRefs) { if (packedRefsFile.exists()) { throw noPackedRefs; } // Ignore it and leave the new list empty. return PackedRefList.NO_PACKED_REFS; } try { return new PackedRefList(parsePackedRefs(br), snapshot, ObjectId.fromRaw(digest.digest())); } catch (IOException e) { if (FileUtils.isStaleFileHandle(e) && retries < maxStaleRetries) { if (LOG.isDebugEnabled()) { LOG.debug( MessageFormat.format( JGitText.get().packedRefsHandleIsStale, Integer.valueOf(retries)), e); } retries++; continue; } throw e; } finally { br.close(); } } }
@Test public void testRejectInvalidWindowsPaths() throws Exception { SystemReader.setInstance( new MockSystemReader() { { setUnix(); } }); String path = "src/con.txt"; DirCache dc = db.lockDirCache(); DirCacheBuilder b = dc.builder(); DirCacheEntry e = new DirCacheEntry(path); e.setFileMode(FileMode.REGULAR_FILE); try (ObjectInserter.Formatter formatter = new ObjectInserter.Formatter()) { e.setObjectId(formatter.idFor(Constants.OBJ_BLOB, Constants.encode(path))); } b.add(e); b.commit(); db.readDirCache(); SystemReader.setInstance( new MockSystemReader() { { setWindows(); } }); try { db.readDirCache(); fail("should have rejected " + path); } catch (CorruptObjectException err) { assertEquals(MessageFormat.format(JGitText.get().invalidPath, path), err.getMessage()); assertNotNull(err.getCause()); assertEquals("invalid name 'CON'", err.getCause().getMessage()); } }
/** * Locate the position a path's entry is at in the index. * * <p>If there is at least one entry in the index for this path the position of the lowest stage * is returned. Subsequent stages can be identified by testing consecutive entries until the path * differs. * * <p>If no path matches the entry -(position+1) is returned, where position is the location it * would have gone within the index. * * @param path the path to search for. * @return if >= 0 then the return value is the position of the entry in the index; pass to {@link * #getEntry(int)} to obtain the entry information. If < 0 the entry does not exist in the * index. */ public int findEntry(final String path) { final byte[] p = Constants.encode(path); return findEntry(p, p.length); }
private void readFrom(final InputStream inStream) throws IOException, CorruptObjectException { final BufferedInputStream in = new BufferedInputStream(inStream); final MessageDigest md = Constants.newMessageDigest(); // Read the index header and verify we understand it. // final byte[] hdr = new byte[20]; IO.readFully(in, hdr, 0, 12); md.update(hdr, 0, 12); if (!is_DIRC(hdr)) throw new CorruptObjectException(JGitText.get().notADIRCFile); final int ver = NB.decodeInt32(hdr, 4); boolean extended = false; if (ver == 3) extended = true; else if (ver != 2) throw new CorruptObjectException( MessageFormat.format(JGitText.get().unknownDIRCVersion, Integer.valueOf(ver))); entryCnt = NB.decodeInt32(hdr, 8); if (entryCnt < 0) throw new CorruptObjectException(JGitText.get().DIRCHasTooManyEntries); snapshot = FileSnapshot.save(liveFile); int smudge_s = (int) (snapshot.lastModified() / 1000); int smudge_ns = ((int) (snapshot.lastModified() % 1000)) * 1000000; // Load the individual file entries. // final int infoLength = DirCacheEntry.getMaximumInfoLength(extended); final byte[] infos = new byte[infoLength * entryCnt]; sortedEntries = new DirCacheEntry[entryCnt]; final MutableInteger infoAt = new MutableInteger(); for (int i = 0; i < entryCnt; i++) sortedEntries[i] = new DirCacheEntry(infos, infoAt, in, md, smudge_s, smudge_ns); // After the file entries are index extensions, and then a footer. // for (; ; ) { in.mark(21); IO.readFully(in, hdr, 0, 20); if (in.read() < 0) { // No extensions present; the file ended where we expected. // break; } in.reset(); md.update(hdr, 0, 8); IO.skipFully(in, 8); long sz = NB.decodeUInt32(hdr, 4); switch (NB.decodeInt32(hdr, 0)) { case EXT_TREE: { if (Integer.MAX_VALUE < sz) { throw new CorruptObjectException( MessageFormat.format( JGitText.get().DIRCExtensionIsTooLargeAt, formatExtensionName(hdr), Long.valueOf(sz))); } final byte[] raw = new byte[(int) sz]; IO.readFully(in, raw, 0, raw.length); md.update(raw, 0, raw.length); tree = new DirCacheTree(raw, new MutableInteger(), null); break; } default: if (hdr[0] >= 'A' && hdr[0] <= 'Z') { // The extension is optional and is here only as // a performance optimization. Since we do not // understand it, we can safely skip past it, after // we include its data in our checksum. // skipOptionalExtension(in, md, hdr, sz); } else { // The extension is not an optimization and is // _required_ to understand this index format. // Since we did not trap it above we must abort. // throw new CorruptObjectException( MessageFormat.format( JGitText.get().DIRCExtensionNotSupportedByThisVersion, formatExtensionName(hdr))); } } } readIndexChecksum = md.digest(); if (!Arrays.equals(readIndexChecksum, hdr)) { throw new CorruptObjectException(JGitText.get().DIRCChecksumMismatch); } }
/** * Encode a string pattern for faster matching on byte arrays. * * <p>Force the characters to our funny UTF-8 only convention that we use on raw buffers. This * avoids needing to perform character set decodes on the individual commit buffers. * * @param patternText original pattern string supplied by the user or the application. * @return same pattern, but re-encoded to match our funny raw UTF-8 character sequence {@link * RawCharSequence}. */ protected static final String forceToRaw(final String patternText) { final byte[] b = Constants.encode(patternText); final StringBuilder needle = new StringBuilder(b.length); for (int i = 0; i < b.length; i++) needle.append((char) (b[i] & 0xff)); return needle.toString(); }
/** * Walks a working directory tree as part of a {@link TreeWalk}. * * <p>Most applications will want to use the standard implementation of this iterator, {@link * FileTreeIterator}, as that does all IO through the standard <code>java.io</code> package. Plugins * for a Java based IDE may however wish to create their own implementations of this class to allow * traversal of the IDE's project space, as well as benefit from any caching the IDE may have. * * @see FileTreeIterator */ public abstract class WorkingTreeIterator extends AbstractTreeIterator { /** An empty entry array, suitable for {@link #init(Entry[])}. */ protected static final Entry[] EOF = {}; /** Size we perform file IO in if we have to read and hash a file. */ static final int BUFFER_SIZE = 2048; /** Maximum size of files which may be read fully into memory for performance reasons. */ private static final long MAXIMUM_FILE_SIZE_TO_READ_FULLY = 65536; /** Inherited state of this iterator, describing working tree, etc. */ private final IteratorState state; /** The {@link #idBuffer()} for the current entry. */ private byte[] contentId; /** Index within {@link #entries} that {@link #contentId} came from. */ private int contentIdFromPtr; /** List of entries obtained from the subclass. */ private Entry[] entries; /** Total number of entries in {@link #entries} that are valid. */ private int entryCnt; /** Current position within {@link #entries}. */ private int ptr; /** If there is a .gitignore file present, the parsed rules from it. */ private IgnoreNode ignoreNode; /** * Create a new iterator with no parent. * * @param options working tree options to be used */ protected WorkingTreeIterator(WorkingTreeOptions options) { super(); state = new IteratorState(options); } /** * Create a new iterator with no parent and a prefix. * * <p>The prefix path supplied is inserted in front of all paths generated by this iterator. It is * intended to be used when an iterator is being created for a subsection of an overall repository * and needs to be combined with other iterators that are created to run over the entire * repository namespace. * * @param prefix position of this iterator in the repository tree. The value may be null or the * empty string to indicate the prefix is the root of the repository. A trailing slash ('/') * is automatically appended if the prefix does not end in '/'. * @param options working tree options to be used */ protected WorkingTreeIterator(final String prefix, WorkingTreeOptions options) { super(prefix); state = new IteratorState(options); } /** * Create an iterator for a subtree of an existing iterator. * * @param p parent tree iterator. */ protected WorkingTreeIterator(final WorkingTreeIterator p) { super(p); state = p.state; } /** * Initialize this iterator for the root level of a repository. * * <p>This method should only be invoked after calling {@link #init(Entry[])}, and only for the * root iterator. * * @param repo the repository. */ protected void initRootIterator(Repository repo) { Entry entry; if (ignoreNode instanceof PerDirectoryIgnoreNode) entry = ((PerDirectoryIgnoreNode) ignoreNode).entry; else entry = null; ignoreNode = new RootIgnoreNode(entry, repo); } @Override public boolean hasId() { if (contentIdFromPtr == ptr) return true; return (mode & FileMode.TYPE_MASK) == FileMode.TYPE_FILE; } @Override public byte[] idBuffer() { if (contentIdFromPtr == ptr) return contentId; switch (mode & FileMode.TYPE_MASK) { case FileMode.TYPE_FILE: contentIdFromPtr = ptr; return contentId = idBufferBlob(entries[ptr]); case FileMode.TYPE_SYMLINK: // Java does not support symbolic links, so we should not // have reached this particular part of the walk code. // return zeroid; case FileMode.TYPE_GITLINK: // TODO: Support obtaining current HEAD SHA-1 from nested repository // return zeroid; } return zeroid; } private static final byte[] digits = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'}; private static final byte[] hblob = Constants.encodedTypeString(Constants.OBJ_BLOB); private byte[] idBufferBlob(final Entry e) { try { final InputStream is = e.openInputStream(); if (is == null) return zeroid; try { state.initializeDigestAndReadBuffer(); final long len = e.getLength(); if (!mightNeedCleaning()) return computeHash(is, len); if (len <= MAXIMUM_FILE_SIZE_TO_READ_FULLY) { ByteBuffer rawbuf = IO.readWholeStream(is, (int) len); byte[] raw = rawbuf.array(); int n = rawbuf.limit(); if (!isBinary(raw, n)) { rawbuf = filterClean(raw, n); raw = rawbuf.array(); n = rawbuf.limit(); } return computeHash(new ByteArrayInputStream(raw, 0, n), n); } if (isBinary(e)) return computeHash(is, len); final long canonLen; final InputStream lenIs = filterClean(e.openInputStream()); try { canonLen = computeLength(lenIs); } finally { safeClose(lenIs); } return computeHash(filterClean(is), canonLen); } finally { safeClose(is); } } catch (IOException err) { // Can't read the file? Don't report the failure either. return zeroid; } } private static void safeClose(final InputStream in) { try { in.close(); } catch (IOException err2) { // Suppress any error related to closing an input // stream. We don't care, we should not have any // outstanding data to flush or anything like that. } } private boolean mightNeedCleaning() { switch (getOptions().getAutoCRLF()) { case FALSE: default: return false; case TRUE: case INPUT: return true; } } private boolean isBinary(byte[] content, int sz) { return RawText.isBinary(content, sz); } private boolean isBinary(Entry entry) throws IOException { InputStream in = entry.openInputStream(); try { return RawText.isBinary(in); } finally { safeClose(in); } } private ByteBuffer filterClean(byte[] src, int n) throws IOException { InputStream in = new ByteArrayInputStream(src); return IO.readWholeStream(filterClean(in), n); } private InputStream filterClean(InputStream in) { return new EolCanonicalizingInputStream(in); } /** * Returns the working tree options used by this iterator. * * @return working tree options */ public WorkingTreeOptions getOptions() { return state.options; } @Override public int idOffset() { return 0; } @Override public void reset() { if (!first()) { ptr = 0; if (!eof()) parseEntry(); } } @Override public boolean first() { return ptr == 0; } @Override public boolean eof() { return ptr == entryCnt; } @Override public void next(final int delta) throws CorruptObjectException { ptr += delta; if (!eof()) parseEntry(); } @Override public void back(final int delta) throws CorruptObjectException { ptr -= delta; parseEntry(); } private void parseEntry() { final Entry e = entries[ptr]; mode = e.getMode().getBits(); final int nameLen = e.encodedNameLen; ensurePathCapacity(pathOffset + nameLen, pathOffset); System.arraycopy(e.encodedName, 0, path, pathOffset, nameLen); pathLen = pathOffset + nameLen; } /** * Get the byte length of this entry. * * @return size of this file, in bytes. */ public long getEntryLength() { return current().getLength(); } /** * Get the last modified time of this entry. * * @return last modified time of this file, in milliseconds since the epoch (Jan 1, 1970 UTC). */ public long getEntryLastModified() { return current().getLastModified(); } /** * Obtain an input stream to read the file content. * * <p>Efficient implementations are not required. The caller will usually obtain the stream only * once per entry, if at all. * * <p>The input stream should not use buffering if the implementation can avoid it. The caller * will buffer as necessary to perform efficient block IO operations. * * <p>The caller will close the stream once complete. * * @return a stream to read from the file. * @throws IOException the file could not be opened for reading. */ public InputStream openEntryStream() throws IOException { return current().openInputStream(); } /** * Determine if the current entry path is ignored by an ignore rule. * * @return true if the entry was ignored by an ignore rule file. * @throws IOException a relevant ignore rule file exists but cannot be read. */ public boolean isEntryIgnored() throws IOException { return isEntryIgnored(pathLen); } /** * Determine if the entry path is ignored by an ignore rule. * * @param pLen the length of the path in the path buffer. * @return true if the entry is ignored by an ignore rule. * @throws IOException a relevant ignore rule file exists but cannot be read. */ protected boolean isEntryIgnored(final int pLen) throws IOException { IgnoreNode rules = getIgnoreNode(); if (rules != null) { // The ignore code wants path to start with a '/' if possible. // If we have the '/' in our path buffer because we are inside // a subdirectory include it in the range we convert to string. // int pOff = pathOffset; if (0 < pOff) pOff--; String p = TreeWalk.pathOf(path, pOff, pLen); switch (rules.isIgnored(p, FileMode.TREE.equals(mode))) { case IGNORED: return true; case NOT_IGNORED: return false; case CHECK_PARENT: break; } } if (parent instanceof WorkingTreeIterator) return ((WorkingTreeIterator) parent).isEntryIgnored(pLen); return false; } private IgnoreNode getIgnoreNode() throws IOException { if (ignoreNode instanceof PerDirectoryIgnoreNode) ignoreNode = ((PerDirectoryIgnoreNode) ignoreNode).load(); return ignoreNode; } private static final Comparator<Entry> ENTRY_CMP = new Comparator<Entry>() { public int compare(final Entry o1, final Entry o2) { final byte[] a = o1.encodedName; final byte[] b = o2.encodedName; final int aLen = o1.encodedNameLen; final int bLen = o2.encodedNameLen; int cPos; for (cPos = 0; cPos < aLen && cPos < bLen; cPos++) { final int cmp = (a[cPos] & 0xff) - (b[cPos] & 0xff); if (cmp != 0) return cmp; } if (cPos < aLen) return (a[cPos] & 0xff) - lastPathChar(o2); if (cPos < bLen) return lastPathChar(o1) - (b[cPos] & 0xff); return lastPathChar(o1) - lastPathChar(o2); } }; static int lastPathChar(final Entry e) { return e.getMode() == FileMode.TREE ? '/' : '\0'; } /** * Constructor helper. * * @param list files in the subtree of the work tree this iterator operates on */ protected void init(final Entry[] list) { // Filter out nulls, . and .. as these are not valid tree entries, // also cache the encoded forms of the path names for efficient use // later on during sorting and iteration. // entries = list; int i, o; final CharsetEncoder nameEncoder = state.nameEncoder; for (i = 0, o = 0; i < entries.length; i++) { final Entry e = entries[i]; if (e == null) continue; final String name = e.getName(); if (".".equals(name) || "..".equals(name)) continue; if (Constants.DOT_GIT.equals(name)) continue; if (Constants.DOT_GIT_IGNORE.equals(name)) ignoreNode = new PerDirectoryIgnoreNode(e); if (i != o) entries[o] = e; e.encodeName(nameEncoder); o++; } entryCnt = o; Arrays.sort(entries, 0, entryCnt, ENTRY_CMP); contentIdFromPtr = -1; ptr = 0; if (!eof()) parseEntry(); } /** * Obtain the current entry from this iterator. * * @return the currently selected entry. */ protected Entry current() { return entries[ptr]; } /** * Checks whether this entry differs from a given entry from the {@link DirCache}. * * <p>File status information is used and if status is same we consider the file identical to the * state in the working directory. Native git uses more stat fields than we have accessible in * Java. * * @param entry the entry from the dircache we want to compare against * @param forceContentCheck True if the actual file content should be checked if modification time * differs. * @return true if content is most likely different. */ public boolean isModified(DirCacheEntry entry, boolean forceContentCheck) { if (entry.isAssumeValid()) return false; if (entry.isUpdateNeeded()) return true; if (!entry.isSmudged() && (getEntryLength() != entry.getLength())) return true; // Determine difference in mode-bits of file and index-entry. In the // bitwise presentation of modeDiff we'll have a '1' when the two modes // differ at this position. int modeDiff = getEntryRawMode() ^ entry.getRawMode(); // Ignore the executable file bits if checkFilemode tells me to do so. // Ignoring is done by setting the bits representing a EXECUTABLE_FILE // to '0' in modeDiff if (!state.options.isFileMode()) modeDiff &= ~FileMode.EXECUTABLE_FILE.getBits(); if (modeDiff != 0) // Report a modification if the modes still (after potentially // ignoring EXECUTABLE_FILE bits) differ return true; // Git under windows only stores seconds so we round the timestamp // Java gives us if it looks like the timestamp in index is seconds // only. Otherwise we compare the timestamp at millisecond precision. long cacheLastModified = entry.getLastModified(); long fileLastModified = getEntryLastModified(); if (cacheLastModified % 1000 == 0) fileLastModified = fileLastModified - fileLastModified % 1000; if (fileLastModified != cacheLastModified) { // The file is dirty by timestamps if (forceContentCheck) { // But we are told to look at content even though timestamps // tell us about modification return contentCheck(entry); } else { // We are told to assume a modification if timestamps differs return true; } } else { // The file is clean when you look at timestamps. if (entry.isSmudged()) { // The file is clean by timestamps but the entry was smudged. // Lets do a content check return contentCheck(entry); } else { // The file is clean by timestamps and the entry is not // smudged: Can't get any cleaner! return false; } } } /** * Compares the entries content with the content in the filesystem. Unsmudges the entry when it is * detected that it is clean. * * @param entry the entry to be checked * @return <code>true</code> if the content matches, <code>false</code> otherwise */ private boolean contentCheck(DirCacheEntry entry) { if (getEntryObjectId().equals(entry.getObjectId())) { // Content has not changed // We know the entry can't be racily clean because it's still clean. // Therefore we unsmudge the entry! // If by any chance we now unsmudge although we are still in the // same time-slot as the last modification to the index file the // next index write operation will smudge again. // Caution: we are unsmudging just by setting the length of the // in-memory entry object. It's the callers task to detect that we // have modified the entry and to persist the modified index. entry.setLength((int) getEntryLength()); return false; } else { // Content differs: that's a real change! return true; } } private long computeLength(InputStream in) throws IOException { // Since we only care about the length, use skip. The stream // may be able to more efficiently wade through its data. // long length = 0; for (; ; ) { long n = in.skip(1 << 20); if (n <= 0) break; length += n; } return length; } private byte[] computeHash(InputStream in, long length) throws IOException { final MessageDigest contentDigest = state.contentDigest; final byte[] contentReadBuffer = state.contentReadBuffer; contentDigest.reset(); contentDigest.update(hblob); contentDigest.update((byte) ' '); long sz = length; if (sz == 0) { contentDigest.update((byte) '0'); } else { final int bufn = contentReadBuffer.length; int p = bufn; do { contentReadBuffer[--p] = digits[(int) (sz % 10)]; sz /= 10; } while (sz > 0); contentDigest.update(contentReadBuffer, p, bufn - p); } contentDigest.update((byte) 0); for (; ; ) { final int r = in.read(contentReadBuffer); if (r <= 0) break; contentDigest.update(contentReadBuffer, 0, r); sz += r; } if (sz != length) return zeroid; return contentDigest.digest(); } /** A single entry within a working directory tree. */ protected abstract static class Entry { byte[] encodedName; int encodedNameLen; void encodeName(final CharsetEncoder enc) { final ByteBuffer b; try { b = enc.encode(CharBuffer.wrap(getName())); } catch (CharacterCodingException e) { // This should so never happen. throw new RuntimeException( MessageFormat.format(JGitText.get().unencodeableFile, getName())); } encodedNameLen = b.limit(); if (b.hasArray() && b.arrayOffset() == 0) encodedName = b.array(); else b.get(encodedName = new byte[encodedNameLen]); } public String toString() { return getMode().toString() + " " + getName(); } /** * Get the type of this entry. * * <p><b>Note: Efficient implementation required.</b> * * <p>The implementation of this method must be efficient. If a subclass needs to compute the * value they should cache the reference within an instance member instead. * * @return a file mode constant from {@link FileMode}. */ public abstract FileMode getMode(); /** * Get the byte length of this entry. * * <p><b>Note: Efficient implementation required.</b> * * <p>The implementation of this method must be efficient. If a subclass needs to compute the * value they should cache the reference within an instance member instead. * * @return size of this file, in bytes. */ public abstract long getLength(); /** * Get the last modified time of this entry. * * <p><b>Note: Efficient implementation required.</b> * * <p>The implementation of this method must be efficient. If a subclass needs to compute the * value they should cache the reference within an instance member instead. * * @return time since the epoch (in ms) of the last change. */ public abstract long getLastModified(); /** * Get the name of this entry within its directory. * * <p>Efficient implementations are not required. The caller will obtain the name only once and * cache it once obtained. * * @return name of the entry. */ public abstract String getName(); /** * Obtain an input stream to read the file content. * * <p>Efficient implementations are not required. The caller will usually obtain the stream only * once per entry, if at all. * * <p>The input stream should not use buffering if the implementation can avoid it. The caller * will buffer as necessary to perform efficient block IO operations. * * <p>The caller will close the stream once complete. * * @return a stream to read from the file. * @throws IOException the file could not be opened for reading. */ public abstract InputStream openInputStream() throws IOException; } /** Magic type indicating we know rules exist, but they aren't loaded. */ private static class PerDirectoryIgnoreNode extends IgnoreNode { final Entry entry; PerDirectoryIgnoreNode(Entry entry) { super(Collections.<IgnoreRule>emptyList()); this.entry = entry; } IgnoreNode load() throws IOException { IgnoreNode r = new IgnoreNode(); InputStream in = entry.openInputStream(); try { r.parse(in); } finally { in.close(); } return r.getRules().isEmpty() ? null : r; } } /** Magic type indicating there may be rules for the top level. */ private static class RootIgnoreNode extends PerDirectoryIgnoreNode { final Repository repository; RootIgnoreNode(Entry entry, Repository repository) { super(entry); this.repository = repository; } @Override IgnoreNode load() throws IOException { IgnoreNode r; if (entry != null) { r = super.load(); if (r == null) r = new IgnoreNode(); } else { r = new IgnoreNode(); } File exclude = new File(repository.getDirectory(), "info/exclude"); if (exclude.exists()) { FileInputStream in = new FileInputStream(exclude); try { r.parse(in); } finally { in.close(); } } return r.getRules().isEmpty() ? null : r; } } private static final class IteratorState { /** Options used to process the working tree. */ final WorkingTreeOptions options; /** File name character encoder. */ final CharsetEncoder nameEncoder; /** Digest computer for {@link #contentId} computations. */ MessageDigest contentDigest; /** Buffer used to perform {@link #contentId} computations. */ byte[] contentReadBuffer; IteratorState(WorkingTreeOptions options) { this.options = options; this.nameEncoder = Constants.CHARSET.newEncoder(); } void initializeDigestAndReadBuffer() { if (contentDigest == null) { contentDigest = Constants.newMessageDigest(); contentReadBuffer = new byte[BUFFER_SIZE]; } } } }
void initializeDigestAndReadBuffer() { if (contentDigest == null) { contentDigest = Constants.newMessageDigest(); contentReadBuffer = new byte[BUFFER_SIZE]; } }
@Test public void testFindOnEmpty() throws Exception { final DirCache dc = DirCache.newInCore(); final byte[] path = Constants.encode("a"); assertEquals(-1, dc.findEntry(path, path.length)); }
private void insert(String text) throws IOException { insert(Constants.encode(text)); }
private static void digest(TemporaryBuffer.Heap buf) throws IOException { MessageDigest md = Constants.newMessageDigest(); md.update(buf.toByteArray()); buf.write(md.digest()); }
private static ObjectId hash(final byte[] rawText) { return ObjectId.fromRaw(Constants.newMessageDigest().digest(rawText)); }