public static void main(String[] args) throws IOException {
    Closer closer = Closer.create();
    // copy a file
    File origin = new File("join_temp");
    File copy = new File("target_temp");

    try {
      BufferedReader reader = new BufferedReader(new FileReader("join_temp"));
      BufferedWriter writer = new BufferedWriter(new FileWriter("target_temp"));

      closer.register(reader);
      closer.register(writer);

      String line;

      while ((line = reader.readLine()) != null) {
        writer.write(line);
      }
    } catch (IOException e) {
      throw closer.rethrow(e);
    } finally {
      closer.close();
    }

    Files.copy(origin, copy);

    File moved = new File("moved");

    // moving renaming
    Files.move(copy, moved);

    // working files as string
    List<String> lines = Files.readLines(origin, Charsets.UTF_8);

    HashCode hashCode = Files.hash(origin, Hashing.md5());
    System.out.println(hashCode);

    // file write and append
    String hamlet = "To be, or not to be it is a question\n";
    File write_and_append = new File("write_and_append");

    Files.write(hamlet, write_and_append, Charsets.UTF_8);

    Files.append(hamlet, write_and_append, Charsets.UTF_8);

    //        write_and_append.deleteOnExit();

    Files.write("OverWrite the file", write_and_append, Charsets.UTF_8);

    // ByteSource ByteSink
    ByteSource fileBytes = Files.asByteSource(write_and_append);
    byte[] readBytes = fileBytes.read();
    // equals to pre line -> Files.toByteArray(write_and_append) == readBytes

    ByteSink fileByteSink = Files.asByteSink(write_and_append);
    fileByteSink.write(Files.toByteArray(write_and_append));

    BaseEncoding base64 = BaseEncoding.base64();
    System.out.println(base64.encode("123456".getBytes()));
  }
  @Override
  public void moveFileByUpload(final File fileToMove, final Upload upload) {
    if (null == upload.getEnddir() || !upload.getEnddir().exists()) {
      return;
    }
    if (!upload.getEnddir().isDirectory()) {
      logger.warn("Enddir not existing!");
    }
    logger.debug("Moving file to {}", upload.getEnddir().toString());

    File endFile = null;
    final String fileName = _getFileName(fileToMove, upload.getEnddir(), upload);
    for (int i = 0; 100 > i; i++) {
      endFile = new File(_getIncrementedFileName(fileName, i));
      if (!endFile.exists()) {
        break;
      }
    }
    if (null == endFile) {
      return;
    }

    try {
      Files.move(fileToMove, endFile);
    } catch (IOException e) {
      logger.debug("Failed moving file to {}", endFile);
    }
  }
 /**
  * Merge zero or more spill files together, choosing the fastest merging strategy based on the
  * number of spills and the IO compression codec.
  *
  * @return the partition lengths in the merged file.
  */
 private long[] mergeSpills(SpillInfo[] spills) throws IOException {
   final File outputFile = shuffleBlockResolver.getDataFile(shuffleId, mapId);
   final boolean compressionEnabled = sparkConf.getBoolean("spark.shuffle.compress", true);
   final CompressionCodec compressionCodec = CompressionCodec$.MODULE$.createCodec(sparkConf);
   final boolean fastMergeEnabled =
       sparkConf.getBoolean("spark.shuffle.unsafe.fastMergeEnabled", true);
   final boolean fastMergeIsSupported =
       !compressionEnabled || compressionCodec instanceof LZFCompressionCodec;
   try {
     if (spills.length == 0) {
       new FileOutputStream(outputFile).close(); // Create an empty file
       return new long[partitioner.numPartitions()];
     } else if (spills.length == 1) {
       // Here, we don't need to perform any metrics updates because the bytes written to this
       // output file would have already been counted as shuffle bytes written.
       Files.move(spills[0].file, outputFile);
       return spills[0].partitionLengths;
     } else {
       final long[] partitionLengths;
       // There are multiple spills to merge, so none of these spill files' lengths were counted
       // towards our shuffle write count or shuffle write time. If we use the slow merge path,
       // then the final output file's size won't necessarily be equal to the sum of the spill
       // files' sizes. To guard against this case, we look at the output file's actual size when
       // computing shuffle bytes written.
       //
       // We allow the individual merge methods to report their own IO times since different merge
       // strategies use different IO techniques.  We count IO during merge towards the shuffle
       // shuffle write time, which appears to be consistent with the "not bypassing merge-sort"
       // branch in ExternalSorter.
       if (fastMergeEnabled && fastMergeIsSupported) {
         // Compression is disabled or we are using an IO compression codec that supports
         // decompression of concatenated compressed streams, so we can perform a fast spill merge
         // that doesn't need to interpret the spilled bytes.
         if (transferToEnabled) {
           logger.debug("Using transferTo-based fast merge");
           partitionLengths = mergeSpillsWithTransferTo(spills, outputFile);
         } else {
           logger.debug("Using fileStream-based fast merge");
           partitionLengths = mergeSpillsWithFileStream(spills, outputFile, null);
         }
       } else {
         logger.debug("Using slow merge");
         partitionLengths = mergeSpillsWithFileStream(spills, outputFile, compressionCodec);
       }
       // When closing an UnsafeShuffleExternalSorter that has already spilled once but also has
       // in-memory records, we write out the in-memory records to a file but do not count that
       // final write as bytes spilled (instead, it's accounted as shuffle write). The merge needs
       // to be counted as shuffle write, but this will lead to double-counting of the final
       // SpillInfo's bytes.
       writeMetrics.decShuffleBytesWritten(spills[spills.length - 1].file.length());
       writeMetrics.incShuffleBytesWritten(outputFile.length());
       return partitionLengths;
     }
   } catch (IOException e) {
     if (outputFile.exists() && !outputFile.delete()) {
       logger.error("Unable to delete output file {}", outputFile.getPath());
     }
     throw e;
   }
 }
  /** 1. 得到需要处理的file,可能多个 2. copy 3. rename 4. 异步压缩 5. 删除之前文件 */
  @Override
  public void run() {
    String lastTime;
    if (StringUtils.equals(rollUnit, "day")) {
      lastTime = FlumeUtil.getLastDayWithDate(fileDateFormat);
    } else {
      lastTime = FlumeUtil.getLastHourWithDate(fileDateFormat);
    }

    List<String> files = FileUtil.getFiles(logDir, filePrefix, fileCompresionMode, lastTime, false);
    if (files == null || files.size() < 1) {
      LOGGER.warn("No matched logs found in {}, fileDateFormat={}", logDir, fileDateFormat);
      return;
    }
    for (String file : files) {
      String src = logDir + "/" + file;
      String copyDst = spoolDir + "/" + file + completedSuffix;
      String moveDst = spoolDir + "/" + file;

      File srcFile = new File(src);
      File copyDstFile = new File(copyDst);
      File moveDstFile = new File(moveDst);

      // copy: 从logDir中拷贝file到spoolDir中,文件名要加上completedSuffix
      try {
        Files.copy(srcFile, copyDstFile);
        LOGGER.info("Copy file {} to {}.", src, copyDst);
      } catch (IOException e) {
        LOGGER.error("Connot copy file {} to {}.", src, copyDst);
      }

      // rename: 复制完成后,将file去掉completeSuffix进行重命名
      try {
        Files.move(copyDstFile, moveDstFile);
        LOGGER.info("Move file {} to {}.", copyDst, moveDst);
      } catch (IOException e) {
        LOGGER.error("Connot move file {} to {}.", copyDst, moveDst);
      }

      // 异步压缩
      if (needCompress()) {
        String innerEntryName = FileUtil.afterLastSlash(src);
        compressFuture = compressAsynchronously(src, src, innerEntryName, fileCompresionMode, 60);
      }
    }

    // 删除之前文件
    if (needDeletePastFile()) {
      deleteFiles(logDir, filePrefix, fileCompresionMode, fileMaxHistory);
    }
  }
예제 #5
0
  private void insertNewMethod(
      String originalMethodName, String newMethodName, String filename, String codeToAdd)
      throws IOException {
    codeToAdd = codeToAdd.replaceAll(originalMethodName, newMethodName);
    String tempFileName = filename + "_temp";
    File destFile = new File(tempFileName);
    destFile.createNewFile();
    FileOutputStream fos = new FileOutputStream(destFile);
    Scanner scan = new Scanner(new File(filename));
    scan.useDelimiter("\n");
    boolean classFound = false;
    while (scan.hasNext()) {
      String str = scan.next();
      if (!classFound && str.contains(" class ")) {
        classFound = true;
        fos.write((str + "\n").getBytes(Charset.forName("UTF-8")));
        if (!str.contains("{")) {
          fos.write((scan.next() + "\n").getBytes(Charset.forName("UTF-8")));
        }
      } else if (classFound) {
        fos.write(codeToAdd.getBytes(Charset.forName("UTF-8")));
        fos.write((str + "\n").getBytes(Charset.forName("UTF-8")));
        break;
      } else {
        fos.write((str + "\n").getBytes(Charset.forName("UTF-8")));
      }
    }

    while (scan.hasNext()) {
      fos.write((scan.next() + "\n").getBytes(Charset.forName("UTF-8")));
    }
    fos.close();
    scan.close();
    File originalFile = new File(filename);
    originalFile.delete();

    File newFile = new File(filename);
    newFile.createNewFile();

    Files.move(destFile, newFile);
  }
예제 #6
0
  private static File makeIndexFiles(
      final List<IndexableAdapter> indexes,
      final File outDir,
      final ProgressIndicator progress,
      final List<String> mergedDimensions,
      final List<String> mergedMetrics,
      final Function<ArrayList<Iterable<Rowboat>>, Iterable<Rowboat>> rowMergerFn)
      throws IOException {
    Map<String, String> metricTypes = Maps.newTreeMap(Ordering.<String>natural().nullsFirst());
    for (IndexableAdapter adapter : indexes) {
      for (String metric : adapter.getAvailableMetrics()) {
        metricTypes.put(metric, adapter.getMetricType(metric));
      }
    }
    final Interval dataInterval;

    /** *********** Main index.drd file ************* */
    progress.progress();
    long startTime = System.currentTimeMillis();
    File indexFile = new File(outDir, "index.drd");

    FileOutputStream fileOutputStream = null;
    FileChannel channel = null;
    try {
      fileOutputStream = new FileOutputStream(indexFile);
      channel = fileOutputStream.getChannel();
      channel.write(ByteBuffer.wrap(new byte[] {IndexIO.CURRENT_VERSION_ID}));

      GenericIndexed.fromIterable(mergedDimensions, GenericIndexed.stringStrategy)
          .writeToChannel(channel);
      GenericIndexed.fromIterable(mergedMetrics, GenericIndexed.stringStrategy)
          .writeToChannel(channel);

      DateTime minTime = new DateTime(Long.MAX_VALUE);
      DateTime maxTime = new DateTime(0l);

      for (IndexableAdapter index : indexes) {
        minTime = JodaUtils.minDateTime(minTime, index.getDataInterval().getStart());
        maxTime = JodaUtils.maxDateTime(maxTime, index.getDataInterval().getEnd());
      }

      dataInterval = new Interval(minTime, maxTime);
      serializerUtils.writeString(channel, String.format("%s/%s", minTime, maxTime));
    } finally {
      Closeables.closeQuietly(channel);
      channel = null;
      Closeables.closeQuietly(fileOutputStream);
      fileOutputStream = null;
    }
    IndexIO.checkFileSize(indexFile);
    log.info(
        "outDir[%s] completed index.drd in %,d millis.",
        outDir, System.currentTimeMillis() - startTime);

    /** *********** Setup Dim Conversions ************* */
    progress.progress();
    startTime = System.currentTimeMillis();

    IOPeon ioPeon = new TmpFileIOPeon();
    ArrayList<FileOutputSupplier> dimOuts = Lists.newArrayListWithCapacity(mergedDimensions.size());
    Map<String, Integer> dimensionCardinalities = Maps.newHashMap();
    ArrayList<Map<String, IntBuffer>> dimConversions =
        Lists.newArrayListWithCapacity(indexes.size());

    for (IndexableAdapter index : indexes) {
      dimConversions.add(Maps.<String, IntBuffer>newHashMap());
    }

    for (String dimension : mergedDimensions) {
      final FlattenedArrayWriter<String> writer =
          new FlattenedArrayWriter<String>(ioPeon, dimension, GenericIndexed.stringStrategy);
      writer.open();

      List<Indexed<String>> dimValueLookups = Lists.newArrayListWithCapacity(indexes.size());
      DimValueConverter[] converters = new DimValueConverter[indexes.size()];
      for (int i = 0; i < indexes.size(); i++) {
        Indexed<String> dimValues = indexes.get(i).getDimValueLookup(dimension);
        if (dimValues != null) {
          dimValueLookups.add(dimValues);
          converters[i] = new DimValueConverter(dimValues);
        }
      }

      Iterable<String> dimensionValues =
          CombiningIterable.createSplatted(
              Iterables.transform(
                  dimValueLookups,
                  new Function<Indexed<String>, Iterable<String>>() {
                    @Override
                    public Iterable<String> apply(@Nullable Indexed<String> indexed) {
                      return Iterables.transform(
                          indexed,
                          new Function<String, String>() {
                            @Override
                            public String apply(@Nullable String input) {
                              return (input == null) ? "" : input;
                            }
                          });
                    }
                  }),
              Ordering.<String>natural().nullsFirst());

      int count = 0;
      for (String value : dimensionValues) {
        value = value == null ? "" : value;
        writer.write(value);

        for (int i = 0; i < indexes.size(); i++) {
          DimValueConverter converter = converters[i];
          if (converter != null) {
            converter.convert(value, count);
          }
        }

        ++count;
      }
      dimensionCardinalities.put(dimension, count);

      FileOutputSupplier dimOut =
          new FileOutputSupplier(IndexIO.makeDimFile(outDir, dimension), true);
      dimOuts.add(dimOut);

      writer.close();
      serializerUtils.writeString(dimOut, dimension);
      ByteStreams.copy(writer.combineStreams(), dimOut);
      for (int i = 0; i < indexes.size(); ++i) {
        DimValueConverter converter = converters[i];
        if (converter != null) {
          dimConversions.get(i).put(dimension, converters[i].getConversionBuffer());
        }
      }

      ioPeon.cleanup();
    }
    log.info(
        "outDir[%s] completed dim conversions in %,d millis.",
        outDir, System.currentTimeMillis() - startTime);

    /** *********** Walk through data sets and merge them ************ */
    progress.progress();
    startTime = System.currentTimeMillis();

    ArrayList<Iterable<Rowboat>> boats = Lists.newArrayListWithCapacity(indexes.size());

    for (int i = 0; i < indexes.size(); ++i) {
      final IndexableAdapter adapter = indexes.get(i);

      final int[] dimLookup = new int[mergedDimensions.size()];
      int count = 0;
      for (String dim : adapter.getAvailableDimensions()) {
        dimLookup[count] = mergedDimensions.indexOf(dim.toLowerCase());
        count++;
      }

      final int[] metricLookup = new int[mergedMetrics.size()];
      count = 0;
      for (String metric : adapter.getAvailableMetrics()) {
        metricLookup[count] = mergedMetrics.indexOf(metric);
        count++;
      }

      boats.add(
          new MMappedIndexRowIterable(
              Iterables.transform(
                  indexes.get(i).getRows(),
                  new Function<Rowboat, Rowboat>() {
                    @Override
                    public Rowboat apply(@Nullable Rowboat input) {
                      int[][] newDims = new int[mergedDimensions.size()][];
                      int j = 0;
                      for (int[] dim : input.getDims()) {
                        newDims[dimLookup[j]] = dim;
                        j++;
                      }

                      Object[] newMetrics = new Object[mergedMetrics.size()];
                      j = 0;
                      for (Object met : input.getMetrics()) {
                        newMetrics[metricLookup[j]] = met;
                        j++;
                      }

                      return new Rowboat(
                          input.getTimestamp(), newDims, newMetrics, input.getRowNum());
                    }
                  }),
              mergedDimensions,
              dimConversions.get(i),
              i));
    }

    Iterable<Rowboat> theRows = rowMergerFn.apply(boats);

    CompressedLongsSupplierSerializer littleEndianTimeWriter =
        CompressedLongsSupplierSerializer.create(
            ioPeon, "little_end_time", ByteOrder.LITTLE_ENDIAN);
    CompressedLongsSupplierSerializer bigEndianTimeWriter =
        CompressedLongsSupplierSerializer.create(ioPeon, "big_end_time", ByteOrder.BIG_ENDIAN);

    littleEndianTimeWriter.open();
    bigEndianTimeWriter.open();

    ArrayList<VSizeIndexedWriter> forwardDimWriters =
        Lists.newArrayListWithCapacity(mergedDimensions.size());
    for (String dimension : mergedDimensions) {
      VSizeIndexedWriter writer =
          new VSizeIndexedWriter(ioPeon, dimension, dimensionCardinalities.get(dimension));
      writer.open();
      forwardDimWriters.add(writer);
    }

    ArrayList<MetricColumnSerializer> metWriters =
        Lists.newArrayListWithCapacity(mergedMetrics.size());
    for (Map.Entry<String, String> entry : metricTypes.entrySet()) {
      String metric = entry.getKey();
      String typeName = entry.getValue();
      if ("float".equals(typeName)) {
        metWriters.add(new FloatMetricColumnSerializer(metric, outDir, ioPeon));
      } else {
        ComplexMetricSerde serde = ComplexMetrics.getSerdeForType(typeName);

        if (serde == null) {
          throw new ISE("Unknown type[%s]", typeName);
        }

        metWriters.add(new ComplexMetricColumnSerializer(metric, outDir, ioPeon, serde));
      }
    }
    for (MetricColumnSerializer metWriter : metWriters) {
      metWriter.open();
    }

    int rowCount = 0;
    long time = System.currentTimeMillis();
    List<IntBuffer> rowNumConversions = Lists.newArrayListWithCapacity(indexes.size());
    for (IndexableAdapter index : indexes) {
      int[] arr = new int[index.getNumRows()];
      Arrays.fill(arr, INVALID_ROW);
      rowNumConversions.add(IntBuffer.wrap(arr));
    }

    for (Rowboat theRow : theRows) {
      progress.progress();
      littleEndianTimeWriter.add(theRow.getTimestamp());
      bigEndianTimeWriter.add(theRow.getTimestamp());

      final Object[] metrics = theRow.getMetrics();
      for (int i = 0; i < metrics.length; ++i) {
        metWriters.get(i).serialize(metrics[i]);
      }

      int[][] dims = theRow.getDims();
      for (int i = 0; i < dims.length; ++i) {
        List<Integer> listToWrite =
            (i >= dims.length || dims[i] == null) ? null : Ints.asList(dims[i]);
        forwardDimWriters.get(i).write(listToWrite);
      }

      for (Map.Entry<Integer, TreeSet<Integer>> comprisedRow :
          theRow.getComprisedRows().entrySet()) {
        final IntBuffer conversionBuffer = rowNumConversions.get(comprisedRow.getKey());

        for (Integer rowNum : comprisedRow.getValue()) {
          while (conversionBuffer.position() < rowNum) {
            conversionBuffer.put(INVALID_ROW);
          }
          conversionBuffer.put(rowCount);
        }
      }

      if ((++rowCount % 500000) == 0) {
        log.info(
            "outDir[%s] walked 500,000/%,d rows in %,d millis.",
            outDir, rowCount, System.currentTimeMillis() - time);
        time = System.currentTimeMillis();
      }
    }

    for (IntBuffer rowNumConversion : rowNumConversions) {
      rowNumConversion.rewind();
    }

    final File littleEndianFile = IndexIO.makeTimeFile(outDir, ByteOrder.LITTLE_ENDIAN);
    littleEndianFile.delete();
    OutputSupplier<FileOutputStream> out = Files.newOutputStreamSupplier(littleEndianFile, true);
    littleEndianTimeWriter.closeAndConsolidate(out);
    IndexIO.checkFileSize(littleEndianFile);

    final File bigEndianFile = IndexIO.makeTimeFile(outDir, ByteOrder.BIG_ENDIAN);
    bigEndianFile.delete();
    out = Files.newOutputStreamSupplier(bigEndianFile, true);
    bigEndianTimeWriter.closeAndConsolidate(out);
    IndexIO.checkFileSize(bigEndianFile);

    for (int i = 0; i < mergedDimensions.size(); ++i) {
      forwardDimWriters.get(i).close();
      ByteStreams.copy(forwardDimWriters.get(i).combineStreams(), dimOuts.get(i));
    }

    for (MetricColumnSerializer metWriter : metWriters) {
      metWriter.close();
    }

    ioPeon.cleanup();
    log.info(
        "outDir[%s] completed walk through of %,d rows in %,d millis.",
        outDir, rowCount, System.currentTimeMillis() - startTime);

    /** ********** Create Inverted Indexes ************ */
    startTime = System.currentTimeMillis();

    final File invertedFile = new File(outDir, "inverted.drd");
    Files.touch(invertedFile);
    out = Files.newOutputStreamSupplier(invertedFile, true);
    for (int i = 0; i < mergedDimensions.size(); ++i) {
      long dimStartTime = System.currentTimeMillis();
      String dimension = mergedDimensions.get(i);

      File dimOutFile = dimOuts.get(i).getFile();
      final MappedByteBuffer dimValsMapped = Files.map(dimOutFile);

      if (!dimension.equals(serializerUtils.readString(dimValsMapped))) {
        throw new ISE("dimensions[%s] didn't equate!?  This is a major WTF moment.", dimension);
      }
      Indexed<String> dimVals =
          GenericIndexed.readFromByteBuffer(dimValsMapped, GenericIndexed.stringStrategy);
      log.info("Starting dimension[%s] with cardinality[%,d]", dimension, dimVals.size());

      FlattenedArrayWriter<ImmutableConciseSet> writer =
          new FlattenedArrayWriter<ImmutableConciseSet>(
              ioPeon, dimension, ConciseCompressedIndexedInts.objectStrategy);
      writer.open();

      for (String dimVal : IndexedIterable.create(dimVals)) {
        progress.progress();
        List<Iterable<Integer>> convertedInverteds = Lists.newArrayListWithCapacity(indexes.size());
        for (int j = 0; j < indexes.size(); ++j) {
          convertedInverteds.add(
              new ConvertingIndexedInts(
                  indexes.get(j).getInverteds(dimension, dimVal), rowNumConversions.get(j)));
        }

        ConciseSet bitset = new ConciseSet();
        for (Integer row :
            CombiningIterable.createSplatted(
                convertedInverteds, Ordering.<Integer>natural().nullsFirst())) {
          if (row != INVALID_ROW) {
            bitset.add(row);
          }
        }

        writer.write(ImmutableConciseSet.newImmutableFromMutable(bitset));
      }
      writer.close();

      serializerUtils.writeString(out, dimension);
      ByteStreams.copy(writer.combineStreams(), out);
      ioPeon.cleanup();

      log.info(
          "Completed dimension[%s] in %,d millis.",
          dimension, System.currentTimeMillis() - dimStartTime);
    }
    log.info(
        "outDir[%s] completed inverted.drd in %,d millis.",
        outDir, System.currentTimeMillis() - startTime);

    final ArrayList<String> expectedFiles =
        Lists.newArrayList(
            Iterables.concat(
                Arrays.asList(
                    "index.drd", "inverted.drd", "time_BIG_ENDIAN.drd", "time_LITTLE_ENDIAN.drd"),
                Iterables.transform(mergedDimensions, GuavaUtils.formatFunction("dim_%s.drd")),
                Iterables.transform(
                    mergedMetrics, GuavaUtils.formatFunction("met_%s_LITTLE_ENDIAN.drd")),
                Iterables.transform(
                    mergedMetrics, GuavaUtils.formatFunction("met_%s_BIG_ENDIAN.drd"))));

    Map<String, File> files = Maps.newLinkedHashMap();
    for (String fileName : expectedFiles) {
      files.put(fileName, new File(outDir, fileName));
    }

    File smooshDir = new File(outDir, "smoosher");
    smooshDir.mkdir();

    for (Map.Entry<String, File> entry : Smoosh.smoosh(outDir, smooshDir, files).entrySet()) {
      entry.getValue().delete();
    }

    for (File file : smooshDir.listFiles()) {
      Files.move(file, new File(outDir, file.getName()));
    }

    if (!smooshDir.delete()) {
      log.info(
          "Unable to delete temporary dir[%s], contains[%s]",
          smooshDir, Arrays.asList(smooshDir.listFiles()));
      throw new IOException(String.format("Unable to delete temporary dir[%s]", smooshDir));
    }

    createIndexDrdFile(
        IndexIO.CURRENT_VERSION_ID,
        outDir,
        GenericIndexed.fromIterable(mergedDimensions, GenericIndexed.stringStrategy),
        GenericIndexed.fromIterable(mergedMetrics, GenericIndexed.stringStrategy),
        dataInterval);

    return outDir;
  }
  @Override
  public Deployment install(Installation installation) {
    Preconditions.checkNotNull(installation, "installation is null");

    File deploymentDir = new File(baseDir, "installation");

    Assignment assignment = installation.getAssignment();

    Deployment newDeployment =
        new Deployment(
            slotId, location, deploymentDir, getDataDir(), assignment, installation.getResources());
    File tempDir = createTempDir(baseDir, "tmp-install");
    try {
      // download the binary
      File binary = new File(tempDir, "airship-binary.tar.gz");
      try {
        Files.copy(Resources.newInputStreamSupplier(installation.getBinaryFile().toURL()), binary);
      } catch (IOException e) {
        throw new RuntimeException(
            "Unable to download binary "
                + assignment.getBinary()
                + " from "
                + installation.getBinaryFile(),
            e);
      }

      // unpack the binary into a temp unpack dir
      File unpackDir = new File(tempDir, "unpack");
      unpackDir.mkdirs();
      try {
        extractTar(binary, unpackDir, tarTimeout);
      } catch (CommandFailedException e) {
        throw new RuntimeException(
            "Unable to extract tar file " + assignment.getBinary() + ": " + e.getMessage());
      }

      // find the archive root dir (it should be the only file in the temp unpack dir)
      List<File> files = listFiles(unpackDir);
      if (files.size() != 1) {
        throw new RuntimeException(
            "Invalid tar file: file does not have a root directory " + assignment.getBinary());
      }
      File binaryRootDir = files.get(0);

      // unpack config bundle
      try {
        URL url = installation.getConfigFile().toURL();
        ConfigUtils.unpackConfig(Resources.newInputStreamSupplier(url), binaryRootDir);
      } catch (Exception e) {
        throw new RuntimeException(
            "Unable to extract config bundle " + assignment.getConfig() + ": " + e.getMessage());
      }

      // installation is good, clear the current deployment
      if (this.deployment != null) {
        this.deploymentFile.delete();
        deleteRecursively(this.deployment.getDeploymentDir());
        this.deployment = null;
      }

      // save deployment versions file
      try {
        save(newDeployment);
      } catch (IOException e) {
        throw new RuntimeException("Unable to save deployment file", e);
      }

      // move the binary root directory to the final target
      try {
        Files.move(binaryRootDir, deploymentDir);
      } catch (IOException e) {
        throw new RuntimeException("Unable to move deployment to final location", e);
      }
    } finally {
      if (!deleteRecursively(tempDir)) {
        log.warn("Unable to delete temp directory: %s", tempDir.getAbsolutePath());
      }
    }

    this.deployment = newDeployment;
    return newDeployment;
  }
예제 #8
0
 @Override
 public boolean run(TestRun ctx) {
   String debugmsg = StringUtils.debugmsg(false);
   try {
     String tf = ctx.string("file");
     ctx.log().info(debugmsg + "File parameter: " + tf);
     File target = new File(tf);
     if (!target.isAbsolute()) {
       String projectDir = null;
       try {
         projectDir = WDTestRun.getVariableValue(STAFHelper.SAFS_VAR_PROJECTDIRECTORY);
         if (projectDir != null && projectDir.length() > 0) {
           if (!projectDir.endsWith("/") && !projectDir.endsWith("\\")) {
             projectDir += File.separator;
           }
           if (tf.startsWith("/") || tf.startsWith("\\")) {
             tf = tf.substring(1);
           }
           tf = projectDir + tf;
           target = new File(tf);
           if (!target.isAbsolute()) {
             throw new IllegalArgumentException(
                 "File parameter does not resolve to an absolute filepath.");
           }
         } else {
           throw new IllegalArgumentException(
               "Valid ProjectRoot not available and file parameter does not resolve to an absolute filepath.");
         }
       } catch (Exception x) {
         ctx.log().error(x.getClass().getSimpleName() + ", " + x.getMessage());
         ctx.log()
             .error(
                 debugmsg
                     + "Filepath parameter must be absolute or relative to the Project: "
                     + target.getPath());
         return false;
       }
     }
     if (target.isFile()) {
       target.delete();
       if (target.isFile()) {
         ctx.log()
             .error(
                 debugmsg + "File exists and could not be deleted: " + target.getAbsolutePath());
         return false;
       }
     }
     try {
       Files.createParentDirs(target);
     } catch (IOException io) {
       ctx.log().debug(debugmsg + io.getMessage() + ", attempted Files.createParentDirs...");
       throw io;
     }
     Files.move(ctx.driver().getScreenshotAs(OutputType.FILE), target);
     return target.isFile();
   } catch (NullPointerException np) {
     ctx.log()
         .error(
             debugmsg
                 + "NullPointerException "
                 + np.getMessage()
                 + ", probably caused by missing FILEPATH parameter.");
   } catch (IOException io) {
     ctx.log().error(debugmsg + io.getClass().getSimpleName() + ", " + io.getMessage());
   }
   return false;
 }
  /**
   * Handles an incoming PUT request from a BLOB client.
   *
   * @param inputStream The input stream to read incoming data from.
   * @param outputStream The output stream to send data back to the client.
   * @param buf An auxiliary buffer for data serialization/deserialization.
   */
  private void put(InputStream inputStream, OutputStream outputStream, byte[] buf)
      throws IOException {
    JobID jobID = null;
    String key = null;
    MessageDigest md = null;

    File incomingFile = null;
    FileOutputStream fos = null;

    try {
      final int contentAddressable = inputStream.read();
      if (contentAddressable < 0) {
        throw new EOFException("Premature end of PUT request");
      }

      if (contentAddressable == NAME_ADDRESSABLE) {
        // Receive the job ID and key
        byte[] jidBytes = new byte[JobID.SIZE];
        readFully(inputStream, jidBytes, 0, JobID.SIZE, "JobID");
        jobID = JobID.fromByteArray(jidBytes);
        key = readKey(buf, inputStream);
      } else if (contentAddressable == CONTENT_ADDRESSABLE) {
        md = BlobUtils.createMessageDigest();
      } else {
        throw new IOException("Unknown type of BLOB addressing.");
      }

      if (LOG.isDebugEnabled()) {
        if (contentAddressable == NAME_ADDRESSABLE) {
          LOG.debug(String.format("Received PUT request for BLOB under %s / \"%s\"", jobID, key));
        } else {
          LOG.debug("Received PUT request for content addressable BLOB");
        }
      }

      incomingFile = blobServer.createTemporaryFilename();
      fos = new FileOutputStream(incomingFile);

      while (true) {
        final int bytesExpected = readLength(inputStream);
        if (bytesExpected == -1) {
          // done
          break;
        }
        if (bytesExpected > BUFFER_SIZE) {
          throw new IOException("Unexpected number of incoming bytes: " + bytesExpected);
        }

        readFully(inputStream, buf, 0, bytesExpected, "buffer");
        fos.write(buf, 0, bytesExpected);

        if (md != null) {
          md.update(buf, 0, bytesExpected);
        }
      }
      fos.close();

      if (contentAddressable == NAME_ADDRESSABLE) {
        File storageFile = this.blobServer.getStorageLocation(jobID, key);
        Files.move(incomingFile, storageFile);
        incomingFile = null;
        outputStream.write(RETURN_OKAY);
      } else {
        BlobKey blobKey = new BlobKey(md.digest());
        File storageFile = blobServer.getStorageLocation(blobKey);
        Files.move(incomingFile, storageFile);
        incomingFile = null;

        // Return computed key to client for validation
        outputStream.write(RETURN_OKAY);
        blobKey.writeToOutputStream(outputStream);
      }
    } catch (SocketException e) {
      // happens when the other side disconnects
      LOG.debug("Socket connection closed", e);
    } catch (Throwable t) {
      LOG.error("PUT operation failed", t);
      try {
        writeErrorToStream(outputStream, t);
      } catch (IOException e) {
        // since we are in an exception case, it means not much that we could not send the error
        // ignore this
      }
      clientSocket.close();
    } finally {
      if (fos != null) {
        try {
          fos.close();
        } catch (Throwable t) {
          LOG.warn("Cannot close stream to BLOB staging file", t);
        }
      }
      if (incomingFile != null) {
        if (!incomingFile.delete()) {
          LOG.warn("Cannot delete BLOB server staging file " + incomingFile.getAbsolutePath());
        }
      }
    }
  }
예제 #10
0
파일: FileUtils.java 프로젝트: rhli/tachyon
 /**
  * Move file from one place to another, can across storage devices (e.g., from memory to SSD) when
  * {@link File#renameTo} may not work.
  *
  * <p>Current implementation uses {@link com.google.common.io.Files#move(File, File)}, may change
  * if there is a better solution.
  *
  * @param srcPath pathname string of source file
  * @param dstPath pathname string of destination file
  * @throws IOException when fails to move
  */
 public static void move(String srcPath, String dstPath) throws IOException {
   Files.move(new File(srcPath), new File(dstPath));
 }