private void updateHead(final Commit commit) throws IOException { final RefUpdate refUpdate = repository.updateRef(branchName); refUpdate.setNewObjectId(commit.getCommitId()); refUpdate.setRefLogMessage(commit.getMessage(), false); refUpdate.forceUpdate(); final RefUpdate refUpdate2 = repository.updateRef(Constants.HEAD); refUpdate2.setNewObjectId(commit.getCommitId()); refUpdate2.setRefLogMessage(commit.getMessage(), false); refUpdate2.forceUpdate(); }
/** * Create a hive instance at the specified file location. * * @param location file to (bare) git directory where the hive is located * @param create flag to create the hive, or open an exisiting hive */ public Hive(final File location, final boolean create) { try { branchName = "refs/heads/swarmdb"; repository = new Repository(location); if (create) { repository.create(); createVersionInfo(); repository.writeSymref(Constants.HEAD, branchName); } // TODO: check version compatibility } catch (final Exception e) { throw new SwarmdbException( "Unable to create git repository at " + location.getAbsolutePath(), e); } }
@Override protected IStatus run(IProgressMonitor monitor) { try { // A repository can contain many projects, only scan once // (a project could in theory be distributed among many // repositories. We discard that as being ugly and stupid for // the moment. IProject[] projects = ResourcesPlugin.getWorkspace().getRoot().getProjects(); monitor.beginTask("Scanning Git repositories for changes", projects.length); Set<Repository> scanned = new HashSet<Repository>(); for (IProject p : projects) { RepositoryMapping mapping = RepositoryMapping.getMapping(p); if (mapping != null) { Repository r = mapping.getRepository(); if (!scanned.contains(r)) { if (monitor.isCanceled()) break; trace("Scanning " + r + " for changes"); scanned.add(r); ISchedulingRule rule = p.getWorkspace().getRuleFactory().modifyRule(p); getJobManager().beginRule(rule, monitor); try { r.scanForRepoChanges(); } finally { getJobManager().endRule(rule); } } } monitor.worked(1); } monitor.done(); trace("Rescheduling " + getName() + " job"); schedule(REPO_SCAN_INTERVAL); } catch (Exception e) { trace("Stopped rescheduling " + getName() + "job"); return new Status( IStatus.ERROR, getPluginId(), 0, "An error occurred while scanning for changes. Scanning aborted", e); } return Status.OK_STATUS; }
/** * Open new transport instances to connect two repositories. * * @param local existing local repository. * @param remote location of the remote repository - may be URI or remote configuration name. * @param op planned use of the returned Transport; the URI may differ based on the type of * connection desired. * @return the list of new transport instances for every URI in remote configuration. * @throws URISyntaxException the location is not a remote defined in the configuration file and * is not a well-formed URL. * @throws NotSupportedException the protocol specified is not supported. */ public static List<Transport> openAll( final Repository local, final String remote, final Operation op) throws NotSupportedException, URISyntaxException { final RemoteConfig cfg = new RemoteConfig(local.getConfig(), remote); if (doesNotExist(cfg)) { final ArrayList<Transport> transports = new ArrayList<Transport>(1); transports.add(open(local, new URIish(remote))); return transports; } return openAll(local, cfg, op); }
private static Collection<RefSpec> expandPushWildcardsFor( final Repository db, final Collection<RefSpec> specs) { final Map<String, Ref> localRefs = db.getAllRefs(); final Collection<RefSpec> procRefs = new HashSet<RefSpec>(); for (final RefSpec spec : specs) { if (spec.isWildcard()) { for (final Ref localRef : localRefs.values()) { if (spec.matchSource(localRef)) procRefs.add(spec.expandFromSource(localRef)); } } else { procRefs.add(spec); } } return procRefs; }
/** * Convert push remote refs update specification from {@link RefSpec} form to {@link * RemoteRefUpdate}. Conversion expands wildcards by matching source part to local refs. * expectedOldObjectId in RemoteRefUpdate is always set as null. Tracking branch is configured if * RefSpec destination matches source of any fetch ref spec for this transport remote * configuration. * * @param db local database. * @param specs collection of RefSpec to convert. * @param fetchSpecs fetch specifications used for finding localtracking refs. May be null or * empty collection. * @return collection of set up {@link RemoteRefUpdate}. * @throws IOException when problem occurred during conversion or specification set up: most * probably, missing objects or refs. */ public static Collection<RemoteRefUpdate> findRemoteRefUpdatesFor( final Repository db, final Collection<RefSpec> specs, Collection<RefSpec> fetchSpecs) throws IOException { if (fetchSpecs == null) fetchSpecs = Collections.emptyList(); final List<RemoteRefUpdate> result = new LinkedList<RemoteRefUpdate>(); final Collection<RefSpec> procRefs = expandPushWildcardsFor(db, specs); for (final RefSpec spec : procRefs) { String srcSpec = spec.getSource(); final Ref srcRef = db.getRef(srcSpec); if (srcRef != null) srcSpec = srcRef.getName(); String destSpec = spec.getDestination(); if (destSpec == null) { // No destination (no-colon in ref-spec), DWIMery assumes src // destSpec = srcSpec; } if (srcRef != null && !destSpec.startsWith(Constants.R_REFS)) { // Assume the same kind of ref at the destination, e.g. // "refs/heads/foo:master", DWIMery assumes master is also // under "refs/heads/". // final String n = srcRef.getName(); final int kindEnd = n.indexOf('/', Constants.R_REFS.length()); destSpec = n.substring(0, kindEnd + 1) + destSpec; } final boolean forceUpdate = spec.isForceUpdate(); final String localName = findTrackingRefName(destSpec, fetchSpecs); final RemoteRefUpdate rru = new RemoteRefUpdate(db, srcSpec, destSpec, forceUpdate, localName, null); result.add(rru); } return result; }
public void push(final ProgressMonitor monitor, final Map<String, RemoteRefUpdate> refUpdates) throws TransportException { markStartedOperation(); packNames = null; newRefs = new TreeMap<String, Ref>(getRefsMap()); packedRefUpdates = new ArrayList<RemoteRefUpdate>(refUpdates.size()); // Filter the commands and issue all deletes first. This way we // can correctly handle a directory being cleared out and a new // ref using the directory name being created. // final List<RemoteRefUpdate> updates = new ArrayList<RemoteRefUpdate>(); for (final RemoteRefUpdate u : refUpdates.values()) { final String n = u.getRemoteName(); if (!n.startsWith("refs/") || !Repository.isValidRefName(n)) { u.setStatus(Status.REJECTED_OTHER_REASON); u.setMessage("funny refname"); continue; } if (AnyObjectId.equals(ObjectId.zeroId(), u.getNewObjectId())) deleteCommand(u); else updates.add(u); } // If we have any updates we need to upload the objects first, to // prevent creating refs pointing at non-existent data. Then we // can update the refs, and the info-refs file for dumb transports. // if (!updates.isEmpty()) sendpack(updates, monitor); for (final RemoteRefUpdate u : updates) updateCommand(u); // Is this a new repository? If so we should create additional // metadata files so it is properly initialized during the push. // if (!updates.isEmpty() && isNewRepository()) createNewRepository(updates); RefWriter refWriter = new RefWriter(newRefs.values()) { @Override protected void writeFile(String file, byte[] content) throws IOException { dest.writeFile(ROOT_DIR + file, content); } }; if (!packedRefUpdates.isEmpty()) { try { refWriter.writePackedRefs(); for (final RemoteRefUpdate u : packedRefUpdates) u.setStatus(Status.OK); } catch (IOException err) { for (final RemoteRefUpdate u : packedRefUpdates) { u.setStatus(Status.REJECTED_OTHER_REASON); u.setMessage(err.getMessage()); } throw new TransportException(uri, "failed updating refs", err); } } try { refWriter.writeInfoRefs(); } catch (IOException err) { throw new TransportException(uri, "failed updating refs", err); } }
/** * Retrieves the location of this hive. * * @return the location in the filesystem. */ public File getLocation() { return repository.getDirectory(); }
private Commit getLastCommit() throws IOException { Commit commit = repository.mapCommit(branchName); return commit; }
/** * Create a new transport instance. * * @param local the repository this instance will fetch into, or push out of. This must be the * repository passed to {@link #open(Repository, URIish)}. * @param uri the URI used to access the remote repository. This must be the URI passed to {@link * #open(Repository, URIish)}. */ protected Transport(final Repository local, final URIish uri) { final TransferConfig tc = local.getConfig().getTransfer(); this.local = local; this.uri = uri; this.checkFetchedObjects = tc.isFsckObjects(); }
/** * Open a new transport instance to connect two repositories. * * @param local existing local repository. * @param remote location of the remote repository - may be URI or remote configuration name. * @param op planned use of the returned Transport; the URI may differ based on the type of * connection desired. * @return the new transport instance. Never null. In case of multiple URIs in remote * configuration, only the first is chosen. * @throws URISyntaxException the location is not a remote defined in the configuration file and * is not a well-formed URL. * @throws NotSupportedException the protocol specified is not supported. */ public static Transport open(final Repository local, final String remote, final Operation op) throws NotSupportedException, URISyntaxException { final RemoteConfig cfg = new RemoteConfig(local.getConfig(), remote); if (doesNotExist(cfg)) return open(local, new URIish(remote)); return open(local, cfg, op); }
private void setupRepoIndexRefresh() { refreshJob = new RIRefresh(); Repository.addAnyRepositoryChangedListener(refreshJob); }