@Override public <E> Dataset<E> load(String name) { Preconditions.checkArgument(name != null, "Name can not be null"); logger.debug("Loading dataset:{}", name); DatasetDescriptor descriptor = metadataProvider.load(name); descriptor = addRepositoryUri(descriptor); FileSystemDataset<E> ds = new FileSystemDataset.Builder() .name(name) .configuration(conf) .descriptor(descriptor) .partitionKey( descriptor.isPartitioned() ? org.kitesdk.data.impl.Accessor.getDefault().newPartitionKey() : null) .partitionListener(getPartitionListener()) .build(); logger.debug("Loaded dataset:{}", ds); return ds; }
@Override public <E> Dataset<E> create(String name, DatasetDescriptor descriptor) { Preconditions.checkArgument(name != null, "Name can not be null"); Preconditions.checkArgument(descriptor != null, "Descriptor can not be null"); Preconditions.checkArgument( descriptor.getLocation() == null, "Descriptor location cannot be set; " + "it is assigned by the MetadataProvider"); DatasetDescriptor newDescriptor = metadataProvider.create(name, descriptor); newDescriptor = addRepositoryUri(newDescriptor); final URI location = newDescriptor.getLocation(); if (location == null) { throw new DatasetRepositoryException( "[BUG] MetadataProvider did not assign a location to dataset:" + name); } ensureExists(newDescriptor, conf); logger.debug( "Created dataset:{} schema:{} datasetPath:{}", new Object[] {name, newDescriptor.getSchema(), location.toString()}); return new FileSystemDataset.Builder() .name(name) .configuration(conf) .descriptor(newDescriptor) .partitionKey( newDescriptor.isPartitioned() ? org.kitesdk.data.impl.Accessor.getDefault().newPartitionKey() : null) .partitionListener(getPartitionListener()) .build(); }
@Override public boolean delete(String name) { Preconditions.checkArgument(name != null, "Name can not be null"); logger.debug("Deleting dataset:{}", name); DatasetDescriptor descriptor; try { descriptor = metadataProvider.load(name); descriptor = addRepositoryUri(descriptor); } catch (DatasetNotFoundException ex) { return false; } boolean changed; try { // don't care about the return value here -- if it already doesn't exist // we still need to delete the data directory changed = metadataProvider.delete(name); } catch (MetadataProviderException ex) { throw new DatasetRepositoryException("Failed to delete descriptor for name:" + name, ex); } final Path dataLocation = new Path(descriptor.getLocation()); final FileSystem fs = fsForPath(dataLocation, conf); try { if (fs.exists(dataLocation)) { if (fs.delete(dataLocation, true)) { changed = true; } else { throw new DatasetRepositoryException( "Failed to delete dataset name:" + name + " location:" + dataLocation); } } } catch (IOException e) { throw new DatasetRepositoryException( "Internal failure when removing location:" + dataLocation); } return changed; }
@Override public Collection<String> list() { return metadataProvider.list(); }
@Override public boolean exists(String name) { Preconditions.checkArgument(name != null, "Name can not be null"); return metadataProvider.exists(name); }
@Override public <E> Dataset<E> update(String name, DatasetDescriptor descriptor) { Preconditions.checkArgument(name != null, "Dataset name cannot be null"); Preconditions.checkArgument(descriptor != null, "DatasetDescriptro cannot be null"); DatasetDescriptor oldDescriptor = metadataProvider.load(name); // oldDescriptor is valid if load didn't throw NoSuchDatasetException if (!oldDescriptor.getFormat().equals(descriptor.getFormat())) { throw new DatasetRepositoryException( "Cannot change dataset format from " + oldDescriptor.getFormat() + " to " + descriptor.getFormat()); } final URI oldLocation = oldDescriptor.getLocation(); if ((oldLocation != null) && !(oldLocation.equals(descriptor.getLocation()))) { throw new DatasetRepositoryException("Cannot change the dataset's location"); } if (oldDescriptor.isPartitioned() != descriptor.isPartitioned()) { throw new DatasetRepositoryException( "Cannot change an unpartitioned dataset to " + " partitioned or vice versa."); } else if (oldDescriptor.isPartitioned() && descriptor.isPartitioned() && !oldDescriptor.getPartitionStrategy().equals(descriptor.getPartitionStrategy())) { throw new DatasetRepositoryException( "Cannot change partition strategy from " + oldDescriptor.getPartitionStrategy() + " to " + descriptor.getPartitionStrategy()); } // check can read records written with old schema using new schema final Schema oldSchema = oldDescriptor.getSchema(); final Schema newSchema = descriptor.getSchema(); if (!SchemaValidationUtil.canRead(oldSchema, newSchema)) { throw new IncompatibleSchemaException( "New schema cannot read data " + "written using " + "old schema. New schema: " + newSchema.toString(true) + "\nOld schema: " + oldSchema.toString(true)); } DatasetDescriptor updatedDescriptor = metadataProvider.update(name, descriptor); updatedDescriptor = addRepositoryUri(updatedDescriptor); logger.debug( "Updated dataset:{} schema:{} datasetPath:{}", new Object[] { name, updatedDescriptor.getSchema(), updatedDescriptor.getLocation().toString() }); return new FileSystemDataset.Builder() .name(name) .configuration(conf) .descriptor(updatedDescriptor) .partitionKey( updatedDescriptor.isPartitioned() ? org.kitesdk.data.impl.Accessor.getDefault().newPartitionKey() : null) .partitionListener(getPartitionListener()) .build(); }