Пример #1
0
    private void installFiles(
        String filesType,
        ImmutableMap<String, Path> filesToInstallByHash,
        String metadataFileContents,
        String filenameFormat,
        Path destinationDirRelativeToDataRoot)
        throws Exception {
      try (TraceEventLogger ignored1 =
          TraceEventLogger.start(eventBus, "multi_install_" + filesType)) {
        device.createForward(agentPort, agentPort);
        try {
          for (Map.Entry<String, Path> entry : filesToInstallByHash.entrySet()) {
            Path destination =
                destinationDirRelativeToDataRoot.resolve(
                    String.format(filenameFormat, entry.getKey()));
            Path source = entry.getValue();

            try (TraceEventLogger ignored2 =
                TraceEventLogger.start(eventBus, "install_" + filesType)) {
              installFile(device, agentPort, destination, source);
            }
          }
          try (TraceEventLogger ignored3 =
              TraceEventLogger.start(eventBus, "install_" + filesType + "_metadata")) {
            try (NamedTemporaryFile temp = new NamedTemporaryFile("metadata", "tmp")) {
              com.google.common.io.Files.write(
                  metadataFileContents.getBytes(Charsets.UTF_8), temp.get().toFile());
              installFile(
                  device,
                  agentPort,
                  destinationDirRelativeToDataRoot.resolve("metadata.txt"),
                  temp.get());
            }
          }
        } finally {
          try {
            device.removeForward(agentPort, agentPort);
          } catch (AdbCommandRejectedException e) {
            LOG.warn(e, "Failed to remove adb forward on port %d for device %s", agentPort, device);
            eventBus.post(
                ConsoleEvent.warning(
                    "Failed to remove adb forward %d. This is not necessarily a problem\n"
                        + "because it will be recreated during the next exopackage installation.\n"
                        + "See the log for the full exception.",
                    agentPort));
          }
        }
      }
    }
Пример #2
0
  @Override
  public ImmutableList<Step> getBuildSteps(
      BuildContext context, BuildableContext buildableContext) {

    ImmutableList.Builder<Step> commands = ImmutableList.builder();

    commands.add(new MakeCleanDirectoryStep(getProjectFilesystem(), genPath));

    BuildTarget target = getBuildTarget();
    Path outputDirectory = BuildTargets.getScratchPath(target, "__%s.aidl");
    commands.add(new MakeCleanDirectoryStep(getProjectFilesystem(), outputDirectory));

    AidlStep command =
        new AidlStep(
            getProjectFilesystem(),
            target,
            getResolver().getAbsolutePath(aidlFilePath),
            ImmutableSet.of(importPath),
            outputDirectory);
    commands.add(command);

    // Files must ultimately be written to GEN_DIR to be used as source paths.
    Path genDirectory = Paths.get(BuckConstant.GEN_DIR, importPath);

    // Warn the user if the genDirectory is not under the output directory.
    if (!importPath.startsWith(target.getBasePath().toString())) {
      // TODO(shs96c): Make this fatal. Give people some time to clean up their rules.
      context
          .getEventBus()
          .post(
              ConsoleEvent.warning(
                  "%s, gen_aidl import path (%s) should be a child of %s",
                  target, importPath, target.getBasePath()));
    }

    commands.add(new MkdirStep(getProjectFilesystem(), genDirectory));

    commands.add(
        new JarDirectoryStep(
            getProjectFilesystem(),
            output,
            ImmutableSortedSet.of(outputDirectory),
            /* main class */ null,
            /* manifest */ null));
    buildableContext.recordArtifact(output);

    return commands.build();
  }
Пример #3
0
  private CacheResult tryToFetchArtifactFromBuildCacheAndOverlayOnTopOfProjectFilesystem(
      BuildRule rule,
      RuleKey ruleKey,
      BuildInfoRecorder buildInfoRecorder,
      ArtifactCache artifactCache,
      ProjectFilesystem filesystem,
      BuildContext buildContext)
      throws InterruptedException {

    // Create a temp file whose extension must be ".zip" for Filesystems.newFileSystem() to infer
    // that we are creating a zip-based FileSystem.
    Path zipFile;
    try {
      zipFile =
          Files.createTempFile(
              "buck_artifact_" + MoreFiles.sanitize(rule.getBuildTarget().getShortName()), ".zip");
    } catch (IOException e) {
      throw new RuntimeException(e);
    }

    // TODO(mbolin): Change ArtifactCache.fetch() so that it returns a File instead of takes one.
    // Then we could download directly from the remote cache into the on-disk cache and unzip it
    // from there.
    CacheResult cacheResult =
        buildInfoRecorder.fetchArtifactForBuildable(ruleKey, zipFile, artifactCache);
    if (!cacheResult.getType().isSuccess()) {
      try {
        Files.delete(zipFile);
      } catch (IOException e) {
        LOG.warn(e, "failed to delete %s", zipFile);
      }
      return cacheResult;
    }

    // We unzip the file in the root of the project directory.
    // Ideally, the following would work:
    //
    // Path pathToZip = Paths.get(zipFile.getAbsolutePath());
    // FileSystem fs = FileSystems.newFileSystem(pathToZip, /* loader */ null);
    // Path root = Iterables.getOnlyElement(fs.getRootDirectories());
    // MoreFiles.copyRecursively(root, projectRoot);
    //
    // Unfortunately, this does not appear to work, in practice, because MoreFiles fails when trying
    // to resolve a Path for a zip entry against a file Path on disk.
    ArtifactCacheEvent.Started started =
        ArtifactCacheEvent.started(
            ArtifactCacheEvent.Operation.DECOMPRESS, ImmutableSet.of(ruleKey));
    buildContext.getEventBus().post(started);
    try {
      Unzip.extractZipFile(
          zipFile.toAbsolutePath(),
          filesystem,
          Unzip.ExistingFileMode.OVERWRITE_AND_CLEAN_DIRECTORIES);

      // We only delete the ZIP file when it has been unzipped successfully. Otherwise, we leave it
      // around for debugging purposes.
      Files.delete(zipFile);

      if (cacheResult.getType() == CacheResult.Type.HIT) {

        // If we have a hit, also write out the build metadata.
        Path metadataDir = BuildInfo.getPathToMetadataDirectory(rule.getBuildTarget());
        for (Map.Entry<String, String> ent : cacheResult.getMetadata().entrySet()) {
          Path dest = metadataDir.resolve(ent.getKey());
          filesystem.createParentDirs(dest);
          filesystem.writeContentsToPath(ent.getValue(), dest);
        }
      }

    } catch (IOException e) {
      // In the wild, we have seen some inexplicable failures during this step. For now, we try to
      // give the user as much information as we can to debug the issue, but return CacheResult.MISS
      // so that Buck will fall back on doing a local build.
      buildContext
          .getEventBus()
          .post(
              ConsoleEvent.warning(
                  "Failed to unzip the artifact for %s at %s.\n"
                      + "The rule will be built locally, "
                      + "but here is the stacktrace of the failed unzip call:\n"
                      + rule.getBuildTarget(),
                  zipFile.toAbsolutePath(),
                  Throwables.getStackTraceAsString(e)));
      return CacheResult.miss();
    } finally {
      buildContext.getEventBus().post(ArtifactCacheEvent.finished(started));
    }

    return cacheResult;
  }
Пример #4
0
  /**
   * Query Watchman for file change events. If too many events are pending or an error occurs an
   * overflow event is posted to the EventBus signalling that events may have been lost (and so
   * typically caches must be cleared to avoid inconsistency). Interruptions and IOExceptions are
   * propagated to callers, but typically if overflow events are handled conservatively by
   * subscribers then no other remedial action is required.
   */
  @Override
  public void postEvents(BuckEventBus buckEventBus) throws IOException, InterruptedException {
    ProcessExecutor.LaunchedProcess watchmanProcess =
        processExecutor.launchProcess(
            ProcessExecutorParams.builder()
                .addCommand("watchman", "--server-encoding=json", "--no-pretty", "-j")
                .build());
    try {
      LOG.debug("Writing query to Watchman: %s", query);
      watchmanProcess.getOutputStream().write(query.getBytes(Charsets.US_ASCII));
      watchmanProcess.getOutputStream().close();
      LOG.debug("Parsing JSON output from Watchman");
      final long parseStartTimeMillis = clock.currentTimeMillis();
      InputStream jsonInput = watchmanProcess.getInputStream();
      if (LOG.isVerboseEnabled()) {
        byte[] fullResponse = ByteStreams.toByteArray(jsonInput);
        jsonInput.close();
        jsonInput = new ByteArrayInputStream(fullResponse);
        LOG.verbose("Full JSON: " + new String(fullResponse, Charsets.UTF_8).trim());
      }
      JsonParser jsonParser = objectMapper.getJsonFactory().createJsonParser(jsonInput);
      PathEventBuilder builder = new PathEventBuilder();
      JsonToken token = jsonParser.nextToken();
      /*
       * Watchman returns changes as an array of JSON objects with potentially unstable key
       * ordering:
       * {
       *     "files": [
       *     {
       *         "new": false,
       *         "exists": true,
       *         "name": "bin/buckd",
       *     },
       *     ]
       * }
       * A simple way to parse these changes is to collect the relevant values from each object
       * in a builder and then build an event when the end of a JSON object is reached. When the end
       * of the enclosing JSON object is processed the builder will not contain a complete event, so
       * the object end token will be ignored.
       */
      int eventCount = 0;
      while (token != null) {
        boolean shouldOverflow = false;
        if (eventCount > overflow) {
          LOG.warn(
              "Received too many events from Watchmen (%d > overflow max %d), posting overflow "
                  + "event and giving up.",
              eventCount, overflow);
          shouldOverflow = true;
        } else {
          long elapsedMillis = clock.currentTimeMillis() - parseStartTimeMillis;
          if (elapsedMillis >= timeoutMillis) {
            LOG.warn(
                "Parsing took too long (timeout %d ms), posting overflow event and giving up.",
                timeoutMillis);
            shouldOverflow = true;
          }
        }

        if (shouldOverflow) {
          postWatchEvent(createOverflowEvent());
          processExecutor.destroyLaunchedProcess(watchmanProcess);
          processExecutor.waitForLaunchedProcess(watchmanProcess);
          return;
        }

        switch (token) {
          case FIELD_NAME:
            String fieldName = jsonParser.getCurrentName();
            switch (fieldName) {
              case "is_fresh_instance":
                // Force caches to be invalidated --- we have no idea what's happening.
                Boolean newInstance = jsonParser.nextBooleanValue();
                if (newInstance) {
                  LOG.info(
                      "Fresh watchman instance detected. "
                          + "Posting overflow event to flush caches.");
                  postWatchEvent(createOverflowEvent());
                }
                break;

              case "name":
                builder.setPath(Paths.get(jsonParser.nextTextValue()));
                break;
              case "new":
                if (jsonParser.nextBooleanValue()) {
                  builder.setCreationEvent();
                }
                break;
              case "exists":
                if (!jsonParser.nextBooleanValue()) {
                  builder.setDeletionEvent();
                }
                break;
              case "error":
                WatchmanWatcherException e =
                    new WatchmanWatcherException(jsonParser.nextTextValue());
                LOG.error(
                    e, "Error in Watchman output. Posting an overflow event to flush the caches");
                postWatchEvent(createOverflowEvent());
                throw e;
              case "warning":
                String message = jsonParser.nextTextValue();
                buckEventBus.post(
                    ConsoleEvent.warning("Watchman has produced a warning: %s", message));
                LOG.warn("Watchman has produced a warning: %s", message);
                break;
            }
            break;
          case END_OBJECT:
            if (builder.canBuild()) {
              postWatchEvent(builder.build());
              ++eventCount;
            }
            builder = new PathEventBuilder();
            break;
            // $CASES-OMITTED$
          default:
            break;
        }
        token = jsonParser.nextToken();
      }
      int watchmanExitCode;
      LOG.debug("Posted %d Watchman events. Waiting for subprocess to exit...", eventCount);
      watchmanExitCode = processExecutor.waitForLaunchedProcess(watchmanProcess);
      if (watchmanExitCode != 0) {
        LOG.error("Watchman exited with error code %d", watchmanExitCode);
        postWatchEvent(createOverflowEvent()); // Events may have been lost, signal overflow.
        ByteArrayOutputStream buffer = new ByteArrayOutputStream();
        ByteStreams.copy(watchmanProcess.getErrorStream(), buffer);
        throw new WatchmanWatcherException(
            "Watchman failed with exit code " + watchmanExitCode + ": " + buffer.toString());
      } else {
        LOG.debug("Watchman exited cleanly.");
      }
    } catch (InterruptedException e) {
      LOG.warn(e, "Killing Watchman process on interrupted exception");
      postWatchEvent(createOverflowEvent()); // Events may have been lost, signal overflow.
      processExecutor.destroyLaunchedProcess(watchmanProcess);
      processExecutor.waitForLaunchedProcess(watchmanProcess);
      Thread.currentThread().interrupt();
      throw e;
    } catch (IOException e) {
      LOG.error(e, "Killing Watchman process on I/O exception");
      postWatchEvent(createOverflowEvent()); // Events may have been lost, signal overflow.
      processExecutor.destroyLaunchedProcess(watchmanProcess);
      processExecutor.waitForLaunchedProcess(watchmanProcess);
      throw e;
    }
  }