private void writeIndex() {
      Output indexOutput;
      try {
        indexOutput = new Output(new FileOutputStream(getIndexFile()));
      } catch (FileNotFoundException e) {
        throw new UncheckedIOException(e);
      }

      try {
        indexOutput.writeInt(index.size(), true);

        for (Map.Entry<Long, Map<Long, TestCaseRegion>> classEntry : index.entrySet()) {
          Long classId = classEntry.getKey();
          Map<Long, TestCaseRegion> regions = classEntry.getValue();

          indexOutput.writeLong(classId, true);
          indexOutput.writeInt(regions.size(), true);

          for (Map.Entry<Long, TestCaseRegion> testCaseEntry : regions.entrySet()) {
            long id = testCaseEntry.getKey();
            TestCaseRegion region = testCaseEntry.getValue();
            indexOutput.writeLong(id, true);
            indexOutput.writeLong(region.stdOutRegion.start);
            indexOutput.writeLong(region.stdOutRegion.stop);
            indexOutput.writeLong(region.stdErrRegion.start);
            indexOutput.writeLong(region.stdErrRegion.stop);
          }
        }
      } finally {
        indexOutput.close();
      }
    }
 private void writeTicksAndEvents(Output output) throws KryoException {
   int spritesNum = 100;
   String currEdge = "1107293662 1107288392";
   String nextEdge = "";
   Random rand = new Random(System.currentTimeMillis());
   for (int edges = 0; edges < 1000; edges++) {
     Set<String> outgoing = graphData.getEdgesOf(currEdge.split(" ")[1]);
     Object[] setArray = outgoing.toArray();
     nextEdge = (String) setArray[rand.nextInt(outgoing.size())];
     for (int i = 0; i < 10; i = i + 5) {
       eventWriter.writeTick(output, i);
       int percentage = (int) (((double) i / 10) * 100);
       int nextPer = (int) (((double) (i + 5) / 10) * 100);
       for (int j = 0; j < spritesNum; j++) {
         if (Math.random() > 0) {
           //                        System.out.println(currEdge);
           String[] split = currEdge.split(" ");
           eventWriter.writeEvent(
               output, new MoveEvent(j, split[0], split[1], percentage, nextPer));
         }
       }
     }
     currEdge = nextEdge;
   }
   output.close();
 }
  /**
   * Attempt to write the file, delete the backup and return true as atomically as possible. If any
   * exception occurs, delete the new file; next time we will restore from the backup.
   *
   * @param key table key
   * @param objectEntity table instance
   * @param originalFile file to write new data
   * @param backupFile backup file to be used if write is failed
   */
  private <E> void writeTableFile(
      String key, ObjectEntity<E> objectEntity, File originalFile, File backupFile) {
    try {
      FileOutputStream fileStream = new FileOutputStream(originalFile);

      final Output kryoOutput = new Output(fileStream);
      getKryo().writeObject(kryoOutput, objectEntity);
      kryoOutput.flush();
      fileStream.flush();
      sync(fileStream);
      kryoOutput.close(); // also close file stream

      // Writing was successful, delete the backup file if there is one.
      //noinspection ResultOfMethodCallIgnored
      backupFile.delete();
    } catch (IOException | KryoException e) {
      // Clean up an unsuccessfully written file
      if (originalFile.exists()) {
        if (!originalFile.delete()) {
          throw new PaperDbException("Couldn't clean up partially-written file " + originalFile, e);
        }
      }
      throw new PaperDbException(
          "Couldn't save table: "
              + key
              + ". "
              + "Backed up table will be used on next read attempt",
          e);
    }
  }
  private void runTest(
      TestDataDescription description,
      boolean optimizedGenerics,
      String variant,
      Function1<File, Input> inputFactory,
      Function1<File, Output> outputFactory)
      throws Exception {
    File file =
        new File("test/resources/" + description.classSimpleName() + "-" + variant + ".ser");
    file.getParentFile().mkdirs();

    if (file.exists()) {
      Log.info(
          "Reading and testing "
              + description.classSimpleName()
              + " with mode '"
              + variant
              + "' from file "
              + file.getAbsolutePath());
      Input in = inputFactory.apply(file);
      readAndRunTest(description, optimizedGenerics, in);
      in.close();
    } else {
      Log.info(
          "Testing and writing "
              + description.classSimpleName()
              + " with mode '"
              + variant
              + "' to file "
              + file.getAbsolutePath());
      Output out = outputFactory.apply(file);
      try {
        runTestAndWrite(description, optimizedGenerics, out);
        out.close();
      } catch (Exception e) {
        // if anything failed (e.g. the initial test), we should delete the file as it may be empty
        // or corruped
        out.close();
        file.delete();
        throw e;
      }
    }
  }
 private static byte[] serializeObjectToKryo(Serializable object) {
   ByteArrayOutputStream baos = new ByteArrayOutputStream();
   Output output = new Output(baos);
   Kryo kryo = borrowKryo();
   try {
     kryo.writeObject(output, object);
   } finally {
     releaseKryo(kryo);
   }
   output.close();
   return baos.toByteArray();
 }
Exemple #6
0
  public static byte[] serialize(Object object) throws SerializeException {
    if (object == null) {
      throw new SerializeException("指向object对象指针为空");
    }

    Kryo kryo = kryoThreadLocal.get();
    ByteArrayOutputStream stream = new ByteArrayOutputStream(20000);
    Output output = new Output(stream);
    kryo.writeObject(output, object);
    output.close();

    return stream.toByteArray();
  }
 @Test
 public void testSerDeserPerf2() throws Exception {
   Kryo kryo = new Kryo();
   String outputPath = FilenameUtils.concat(getTmpPath(), "file2.bin");
   Output output = new Output(new FileOutputStream(outputPath));
   for (int i = 0; i < 1000; i++) {
     kryo.writeObject(output, constructNewPE());
   }
   output.close();
   Input input = new Input(new FileInputStream(outputPath));
   NewPartitionedEvent someObject = kryo.readObject(input, NewPartitionedEvent.class);
   input.close();
   Assert.assertTrue(someObject.getData().length == 1);
 }
  private DatagramPacket[] messageToPackets(
      final InetSocketAddress remoteSocketAddress, final RPCMessage rpcMessage) {

    final MultiPacketOutputStream mpos =
        new MultiPacketOutputStream(RPCMessage.MAXIMUM_MSG_SIZE + RPCMessage.METADATA_SIZE);
    final Kryo kryo = KryoUtil.getKryo();
    kryo.reset();

    final Output output = new Output(mpos);

    kryo.writeObject(output, new RPCEnvelope(rpcMessage));
    output.close();
    mpos.close();

    return mpos.createPackets(remoteSocketAddress);
  }
  /**
   * @param sopremoRecord
   * @return
   */
  private SopremoRecord serializeAndDeserialize(SopremoRecord sopremoRecord) throws IOException {
    Kryo kryo = new Kryo();
    kryo.setReferences(false);
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    Output output = new Output(baos);
    kryo.writeObject(output, sopremoRecord);
    output.close();
    baos.close();

    ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray());
    Input input = new Input(bais);
    SopremoRecord deserialized = kryo.readObject(input, SopremoRecord.class);
    deserialized.setLayout(sopremoRecord.getLayout());
    input.close();

    return deserialized;
  }
 @Override
 public byte[] toBinary(Object object) {
   ByteArrayOutputStream os = new ByteArrayOutputStream();
   Output output = new Output(os);
   try {
     this.kryo.writeObject(output, object);
   } finally {
     output.close();
     try {
       os.close();
     } catch (IOException e) {
       // ignore
     }
   }
   System.out.println("aaaaa");
   return os.toByteArray();
 }
  private void execute(Map<Object, Object> map, int inserts) {
    Random random = new Random();
    for (int i = 0; i < inserts; i++) map.put(random.nextLong(), random.nextBoolean());

    Kryo kryo = new Kryo();
    kryo.register(HashMap.class, new MapSerializer());
    kryo.register(ConcurrentHashMap.class, new MapSerializer());

    Output output = new Output(2048, -1);
    kryo.writeClassAndObject(output, map);
    output.close();

    Input input = new Input(output.toBytes());
    Object deserialized = kryo.readClassAndObject(input);
    input.close();

    Assert.assertEquals(map, deserialized);
  }
Exemple #12
0
  private static void prepatePackageData(PackageVersion version, List<Instance> members) {
    LOG.info("Preparing Package Data file for package '" + version);
    String jarName = JarUtils.getPackagePath(version, false);
    // Prepare jar for this package version
    DataStore dataStore = InvocationContext.get().getDataStore();
    try (GZIPOutputStream out = new GZIPOutputStream(new FileOutputStream(jarName))) {
      Kryo kryo = new Kryo();
      Output output = new Output(out);

      // Write Package Members
      List<Instance> collect = members.stream().collect(Collectors.toList());
      long id = version.getId();
      SerializationUtil.writeInstances(kryo, output, collect);

      // Write QueryPlans
      List<ReplicationSQLQueryPlan> qPlans = dataStore.getAllQueryPlansByPackageVersion(id);
      output.writeInt(qPlans.size());
      qPlans.forEach(m -> m.write(kryo, output));

      // Write ModelHierarchyInfos
      List<ModelHierarchyInfo> mHierarchies = dataStore.getAllModelHierachyInfoByPackageVersion(id);
      output.writeInt(mHierarchies.size());
      mHierarchies.forEach(
          m -> {
            output.writeInt(m.getParent());
            output.writeInt(m.getSubModel());
          });

      // Write ClassHierarchy infos
      List<ClassHierarchyInfo> clsHierarchies =
          dataStore.getAllClassHierachyInfoByPackageVersion(id);
      output.writeInt(clsHierarchies.size());
      clsHierarchies.forEach(
          m -> {
            output.writeString(m.getCls());
            output.writeString(m.getSubClass());
          });
      output.close();

    } catch (Exception e) {
      LOG.info("Package Data not prepared for " + jarName);
      throw new RuntimeException(e);
    }
  }
  @Override
  public Collection<Partition<AbstractFileInputOperator<T>>> definePartitions(
      Collection<Partition<AbstractFileInputOperator<T>>> partitions, PartitioningContext context) {
    lastRepartition = System.currentTimeMillis();

    int totalCount = getNewPartitionCount(partitions, context);

    LOG.debug("Computed new partitions: {}", totalCount);

    if (totalCount == partitions.size()) {
      return partitions;
    }

    AbstractFileInputOperator<T> tempOperator =
        partitions.iterator().next().getPartitionedInstance();

    MutableLong tempGlobalNumberOfRetries = tempOperator.globalNumberOfRetries;
    MutableLong tempGlobalNumberOfFailures = tempOperator.globalNumberOfRetries;

    /*
     * Build collective state from all instances of the operator.
     */
    Set<String> totalProcessedFiles = Sets.newHashSet();
    Set<FailedFile> currentFiles = Sets.newHashSet();
    List<DirectoryScanner> oldscanners = Lists.newLinkedList();
    List<FailedFile> totalFailedFiles = Lists.newLinkedList();
    List<String> totalPendingFiles = Lists.newLinkedList();
    Set<Integer> deletedOperators = Sets.newHashSet();

    for (Partition<AbstractFileInputOperator<T>> partition : partitions) {
      AbstractFileInputOperator<T> oper = partition.getPartitionedInstance();
      totalProcessedFiles.addAll(oper.processedFiles);
      totalFailedFiles.addAll(oper.failedFiles);
      totalPendingFiles.addAll(oper.pendingFiles);
      currentFiles.addAll(unfinishedFiles);
      tempGlobalNumberOfRetries.add(oper.localNumberOfRetries);
      tempGlobalNumberOfFailures.add(oper.localNumberOfFailures);
      if (oper.currentFile != null) {
        currentFiles.add(new FailedFile(oper.currentFile, oper.offset));
      }
      oldscanners.add(oper.getScanner());
      deletedOperators.add(oper.operatorId);
    }

    /*
     * Create partitions of scanners, scanner's partition method will do state
     * transfer for DirectoryScanner objects.
     */
    List<DirectoryScanner> scanners = scanner.partition(totalCount, oldscanners);

    Kryo kryo = new Kryo();
    Collection<Partition<AbstractFileInputOperator<T>>> newPartitions =
        Lists.newArrayListWithExpectedSize(totalCount);
    Collection<IdempotentStorageManager> newManagers =
        Lists.newArrayListWithExpectedSize(totalCount);

    for (int i = 0; i < scanners.size(); i++) {

      // Kryo.copy fails as it attempts to clone transient fields
      ByteArrayOutputStream bos = new ByteArrayOutputStream();
      Output loutput = new Output(bos);
      kryo.writeObject(loutput, this);
      loutput.close();
      Input lInput = new Input(bos.toByteArray());
      @SuppressWarnings("unchecked")
      AbstractFileInputOperator<T> oper = kryo.readObject(lInput, this.getClass());
      lInput.close();

      DirectoryScanner scn = scanners.get(i);
      oper.setScanner(scn);

      // Do state transfer for processed files.
      oper.processedFiles.addAll(totalProcessedFiles);
      oper.globalNumberOfFailures = tempGlobalNumberOfRetries;
      oper.localNumberOfFailures.setValue(0);
      oper.globalNumberOfRetries = tempGlobalNumberOfFailures;
      oper.localNumberOfRetries.setValue(0);

      /* redistribute unfinished files properly */
      oper.unfinishedFiles.clear();
      oper.currentFile = null;
      oper.offset = 0;
      Iterator<FailedFile> unfinishedIter = currentFiles.iterator();
      while (unfinishedIter.hasNext()) {
        FailedFile unfinishedFile = unfinishedIter.next();
        if (scn.acceptFile(unfinishedFile.path)) {
          oper.unfinishedFiles.add(unfinishedFile);
          unfinishedIter.remove();
        }
      }

      /* transfer failed files */
      oper.failedFiles.clear();
      Iterator<FailedFile> iter = totalFailedFiles.iterator();
      while (iter.hasNext()) {
        FailedFile ff = iter.next();
        if (scn.acceptFile(ff.path)) {
          oper.failedFiles.add(ff);
          iter.remove();
        }
      }

      /* redistribute pending files properly */
      oper.pendingFiles.clear();
      Iterator<String> pendingFilesIterator = totalPendingFiles.iterator();
      while (pendingFilesIterator.hasNext()) {
        String pathString = pendingFilesIterator.next();
        if (scn.acceptFile(pathString)) {
          oper.pendingFiles.add(pathString);
          pendingFilesIterator.remove();
        }
      }
      newPartitions.add(new DefaultPartition<AbstractFileInputOperator<T>>(oper));
      newManagers.add(oper.idempotentStorageManager);
    }

    idempotentStorageManager.partitioned(newManagers, deletedOperators);
    LOG.info("definePartitions called returning {} partitions", newPartitions.size());
    return newPartitions;
  }
 public void close() {
   output.close();
   writeIndex();
 }
 /**
  * @param plan Usually of type MapredWork, MapredLocalWork etc.
  * @param out stream in which serialized plan is written into
  */
 private static void serializeObjectByKryo(Kryo kryo, Object plan, OutputStream out) {
   Output output = new Output(out);
   kryo.setClassLoader(Utilities.getSessionSpecifiedClassLoader());
   kryo.writeObject(output, plan);
   output.close();
 }