Beispiel #1
0
 @VisibleForTesting
 @SuppressWarnings("unchecked")
 <E> FileAppender<E> newAppender(Path temp) {
   Format format = descriptor.getFormat();
   if (Formats.PARQUET.equals(format)) {
     // by default, Parquet is not durable
     if (DescriptorUtil.isDisabled(FileSystemProperties.NON_DURABLE_PARQUET_PROP, descriptor)) {
       return (FileAppender<E>)
           new DurableParquetAppender(
               fs, temp, descriptor.getSchema(), conf, descriptor.getCompressionType());
     } else {
       return (FileAppender<E>)
           new ParquetAppender(
               fs, temp, descriptor.getSchema(), conf, descriptor.getCompressionType());
     }
   } else if (Formats.AVRO.equals(format)) {
     return new AvroAppender<E>(fs, temp, descriptor.getSchema(), descriptor.getCompressionType());
   } else if (Formats.CSV.equals(format)
       && DescriptorUtil.isEnabled(FileSystemProperties.ALLOW_CSV_PROP, descriptor)) {
     return new CSVAppender<E>(fs, temp, descriptor);
   } else {
     this.state = ReaderWriterState.ERROR;
     throw new UnknownFormatException("Unknown format " + descriptor);
   }
 }
Beispiel #2
0
  @Override
  public final void initialize() {
    Preconditions.checkState(
        state.equals(ReaderWriterState.NEW), "Unable to open a writer from state:%s", state);

    // ensure the directory exists
    try {
      fs.mkdirs(directory);
    } catch (IOException ex) {
      this.state = ReaderWriterState.ERROR;
      throw new DatasetIOException("Failed to create path " + directory, ex);
    }

    // initialize paths
    this.finalPath = new Path(directory, uniqueFilename(descriptor.getFormat()));
    this.tempPath = tempFilename(finalPath);
    this.appender = newAppender(tempPath);

    try {
      appender.open();
    } catch (IOException e) {
      this.state = ReaderWriterState.ERROR;
      throw new DatasetIOException("Failed to open appender " + appender, e);
    }

    this.count = 0;

    LOG.debug("Opened appender {} for {}", appender, finalPath);

    this.state = ReaderWriterState.OPEN;
  }
Beispiel #3
0
  @Override
  public final void initialize() {
    Preconditions.checkState(
        state.equals(ReaderWriterState.NEW), "Unable to open a writer from state:%s", state);

    ValidationException.check(
        isSupportedFormat(descriptor), "Not a supported format: %s", descriptor.getFormat());

    // ensure the directory exists
    try {
      fs.mkdirs(directory);
    } catch (RuntimeException e) {
      this.state = ReaderWriterState.ERROR;
      throw new DatasetOperationException(e, "Failed to create path %s", directory);
    } catch (IOException ex) {
      this.state = ReaderWriterState.ERROR;
      throw new DatasetIOException("Failed to create path " + directory, ex);
    }

    // initialize paths
    try {
      this.finalPath = new Path(directory, uniqueFilename(descriptor.getFormat()));
      this.tempPath = tempFilename(finalPath);
    } catch (RuntimeException e) {
      this.state = ReaderWriterState.ERROR;
      throw new DatasetOperationException(e, "Failed to initialize file paths under %s", directory);
    }

    try {
      this.appender = newAppender(tempPath);
      appender.open();
    } catch (RuntimeException e) {
      this.state = ReaderWriterState.ERROR;
      throw new DatasetOperationException(e, "Failed to open appender %s", appender);
    } catch (IOException e) {
      this.state = ReaderWriterState.ERROR;
      throw new DatasetIOException("Failed to open appender " + appender, e);
    }

    this.count = 0;

    LOG.info("Opened output appender {} for {}", appender, finalPath);

    this.state = ReaderWriterState.OPEN;
  }
Beispiel #4
0
  @Override
  public void merge(FileSystemDataset<E> update) {
    DatasetDescriptor updateDescriptor = update.getDescriptor();

    if (!updateDescriptor.getFormat().equals(descriptor.getFormat())) {
      throw new DatasetRepositoryException(
          "Cannot merge dataset format "
              + updateDescriptor.getFormat()
              + " with format "
              + descriptor.getFormat());
    }

    if (updateDescriptor.isPartitioned() != descriptor.isPartitioned()) {
      throw new DatasetRepositoryException(
          "Cannot merge an unpartitioned dataset with a " + " partitioned one or vice versa.");
    } else if (updateDescriptor.isPartitioned()
        && descriptor.isPartitioned()
        && !updateDescriptor.getPartitionStrategy().equals(descriptor.getPartitionStrategy())) {
      throw new DatasetRepositoryException(
          "Cannot merge dataset partition strategy "
              + updateDescriptor.getPartitionStrategy()
              + " with "
              + descriptor.getPartitionStrategy());
    }

    if (!updateDescriptor.getSchema().equals(descriptor.getSchema())) {
      throw new DatasetRepositoryException(
          "Cannot merge dataset schema "
              + updateDescriptor.getFormat()
              + " with schema "
              + descriptor.getFormat());
    }

    Set<String> addedPartitions = Sets.newHashSet();
    for (Path path : update.pathIterator()) {
      URI relativePath = update.getDirectory().toUri().relativize(path.toUri());
      Path newPath = new Path(directory, new Path(relativePath));
      Path newPartitionDirectory = newPath.getParent();
      try {
        if (!fileSystem.exists(newPartitionDirectory)) {
          fileSystem.mkdirs(newPartitionDirectory);
        }
        logger.debug("Renaming {} to {}", path, newPath);
        boolean renameOk = fileSystem.rename(path, newPath);
        if (!renameOk) {
          throw new DatasetException(
              "Dataset merge failed during rename of " + path + " to " + newPath);
        }
      } catch (IOException e) {
        throw new DatasetIOException("Dataset merge failed", e);
      }
      if (descriptor.isPartitioned() && partitionListener != null) {
        String partition = newPartitionDirectory.toString();
        if (!addedPartitions.contains(partition)) {
          partitionListener.partitionAdded(name, partition);
          addedPartitions.add(partition);
        }
      }
    }
  }
Beispiel #5
0
  /**
   * Checks that the {@code existing} {@link DatasetDescriptor} is compatible with {@code test}.
   *
   * @param existing the current {@code DatasetDescriptor} for a dataset
   * @param test a new {@code DatasetDescriptor} for the same dataset
   */
  public static void checkCompatible(DatasetDescriptor existing, DatasetDescriptor test) {
    checkNotChanged("format", existing.getFormat(), test.getFormat());

    checkNotChanged("partitioning", existing.isPartitioned(), test.isPartitioned());

    if (existing.isPartitioned()) {
      checkNotChanged(
          "partition strategy", existing.getPartitionStrategy(), test.getPartitionStrategy());
    }

    // check can read records written with old schema using new schema
    Schema oldSchema = existing.getSchema();
    Schema testSchema = test.getSchema();
    if (!SchemaValidationUtil.canRead(oldSchema, testSchema)) {
      throw new IncompatibleSchemaException(
          "Schema cannot read data "
              + "written using existing schema. Schema: "
              + testSchema.toString(true)
              + "\nExisting schema: "
              + oldSchema.toString(true));
    }
  }
Beispiel #6
0
 static <E> FileSystemWriter<E> newWriter(FileSystem fs, Path path, DatasetDescriptor descriptor) {
   Format format = descriptor.getFormat();
   if (Formats.PARQUET.equals(format)) {
     // by default, Parquet is not durable
     if (DescriptorUtil.isDisabled(FileSystemProperties.NON_DURABLE_PARQUET_PROP, descriptor)) {
       return new IncrementalWriter<E>(fs, path, descriptor);
     } else {
       return new FileSystemWriter<E>(fs, path, descriptor);
     }
   } else if (Formats.AVRO.equals(format) || Formats.CSV.equals(format)) {
     return new IncrementalWriter<E>(fs, path, descriptor);
   } else {
     return new FileSystemWriter<E>(fs, path, descriptor);
   }
 }
 static <E> PartitionedDatasetWriter<E, ?> newWriter(FileSystemView<E> view) {
   DatasetDescriptor descriptor = view.getDataset().getDescriptor();
   Format format = descriptor.getFormat();
   if (Formats.PARQUET.equals(format)) {
     // by default, Parquet is not durable
     if (DescriptorUtil.isDisabled(FileSystemProperties.NON_DURABLE_PARQUET_PROP, descriptor)) {
       return new IncrementalPartitionedDatasetWriter<E>(view);
     } else {
       return new NonDurablePartitionedDatasetWriter<E>(view);
     }
   } else if (Formats.AVRO.equals(format) || Formats.CSV.equals(format)) {
     return new IncrementalPartitionedDatasetWriter<E>(view);
   } else {
     return new NonDurablePartitionedDatasetWriter<E>(view);
   }
 }
  @Override
  public void initialize() {
    Preconditions.checkState(
        state.equals(ReaderWriterState.NEW), "Unable to open a writer from state:%s", state);

    DatasetDescriptor descriptor = view.getDataset().getDescriptor();
    ValidationException.check(
        FileSystemWriter.isSupportedFormat(descriptor),
        "Not a supported format: %s",
        descriptor.getFormat());

    LOG.debug("Opening partitioned dataset writer w/strategy:{}", partitionStrategy);

    cachedWriters =
        CacheBuilder.newBuilder()
            .maximumSize(maxWriters)
            .removalListener(new DatasetWriterCloser<E>())
            .build(createCacheLoader());

    state = ReaderWriterState.OPEN;
  }
Beispiel #9
0
 @SuppressWarnings("unchecked")
 private <E> FileAppender<E> newAppender(Path temp) {
   Format format = descriptor.getFormat();
   if (Formats.PARQUET.equals(format)) {
     // by default, guarantee durability with the more costly writer
     if (DescriptorUtil.isEnabled(FileSystemProperties.NON_DURABLE_PARQUET_PROP, descriptor)) {
       return (FileAppender<E>) new ParquetAppender(fs, temp, descriptor.getSchema(), conf, true);
     } else {
       return (FileAppender<E>)
           new DurableParquetAppender(fs, temp, descriptor.getSchema(), conf, true);
     }
   } else if (Formats.AVRO.equals(format)) {
     return new AvroAppender<E>(fs, temp, descriptor.getSchema(), true);
   } else if (Formats.CSV.equals(format)
       && DescriptorUtil.isEnabled(FileSystemProperties.ALLOW_CSV_PROP, descriptor)) {
     return new CSVAppender<E>(fs, temp, descriptor);
   } else {
     this.state = ReaderWriterState.ERROR;
     throw new DatasetWriterException("Unknown format " + descriptor);
   }
 }
  @Override
  public <E> Dataset<E> update(String name, DatasetDescriptor descriptor) {
    Preconditions.checkArgument(name != null, "Dataset name cannot be null");
    Preconditions.checkArgument(descriptor != null, "DatasetDescriptro cannot be null");

    DatasetDescriptor oldDescriptor = metadataProvider.load(name);

    // oldDescriptor is valid if load didn't throw NoSuchDatasetException

    if (!oldDescriptor.getFormat().equals(descriptor.getFormat())) {
      throw new DatasetRepositoryException(
          "Cannot change dataset format from "
              + oldDescriptor.getFormat()
              + " to "
              + descriptor.getFormat());
    }

    final URI oldLocation = oldDescriptor.getLocation();
    if ((oldLocation != null) && !(oldLocation.equals(descriptor.getLocation()))) {
      throw new DatasetRepositoryException("Cannot change the dataset's location");
    }

    if (oldDescriptor.isPartitioned() != descriptor.isPartitioned()) {
      throw new DatasetRepositoryException(
          "Cannot change an unpartitioned dataset to " + " partitioned or vice versa.");
    } else if (oldDescriptor.isPartitioned()
        && descriptor.isPartitioned()
        && !oldDescriptor.getPartitionStrategy().equals(descriptor.getPartitionStrategy())) {
      throw new DatasetRepositoryException(
          "Cannot change partition strategy from "
              + oldDescriptor.getPartitionStrategy()
              + " to "
              + descriptor.getPartitionStrategy());
    }

    // check can read records written with old schema using new schema
    final Schema oldSchema = oldDescriptor.getSchema();
    final Schema newSchema = descriptor.getSchema();
    if (!SchemaValidationUtil.canRead(oldSchema, newSchema)) {
      throw new IncompatibleSchemaException(
          "New schema cannot read data "
              + "written using "
              + "old schema. New schema: "
              + newSchema.toString(true)
              + "\nOld schema: "
              + oldSchema.toString(true));
    }

    DatasetDescriptor updatedDescriptor = metadataProvider.update(name, descriptor);
    updatedDescriptor = addRepositoryUri(updatedDescriptor);

    logger.debug(
        "Updated dataset:{} schema:{} datasetPath:{}",
        new Object[] {
          name, updatedDescriptor.getSchema(), updatedDescriptor.getLocation().toString()
        });

    return new FileSystemDataset.Builder()
        .name(name)
        .configuration(conf)
        .descriptor(updatedDescriptor)
        .partitionKey(
            updatedDescriptor.isPartitioned()
                ? org.kitesdk.data.impl.Accessor.getDefault().newPartitionKey()
                : null)
        .partitionListener(getPartitionListener())
        .build();
  }
Beispiel #11
0
 static boolean isSupportedFormat(DatasetDescriptor descriptor) {
   Format format = descriptor.getFormat();
   return (SUPPORTED_FORMATS.contains(format)
       || (Formats.CSV.equals(format)
           && DescriptorUtil.isEnabled(FileSystemProperties.ALLOW_CSV_PROP, descriptor)));
 }