@SuppressWarnings("all")
public class GetMarketplaceCategoriesMessage extends org.apache.avro.specific.SpecificRecordBase
    implements org.apache.avro.specific.SpecificRecord {
  public static final org.apache.avro.Schema SCHEMA$ =
      org.apache.avro.Schema.parse(
          "{\"type\":\"record\",\"name\":\"GetMarketplaceCategoriesMessage\",\"namespace\":\"com.x.service.marketplace.message\",\"fields\":[{\"name\":\"siteCode\",\"type\":\"string\"}],\"topic\":\"/marketplace/category/find\"}");
  public java.lang.CharSequence siteCode;

  public org.apache.avro.Schema getSchema() {
    return SCHEMA$;
  }
  // Used by DatumWriter.  Applications should not call.
  public java.lang.Object get(int field$) {
    switch (field$) {
      case 0:
        return siteCode;
      default:
        throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }
  // Used by DatumReader.  Applications should not call.
  @SuppressWarnings(value = "unchecked")
  public void put(int field$, java.lang.Object value$) {
    switch (field$) {
      case 0:
        siteCode = (java.lang.CharSequence) value$;
        break;
      default:
        throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }
}
Пример #2
0
 @Override
 public int run(InputStream stdin, PrintStream out, PrintStream err, List<String> args)
     throws Exception {
   if (args.size() != 2) {
     err.println("Expected 2 arguments: schema binary_data_file");
     err.println("Use '-' as binary_data_file for stdin.");
     return 1;
   }
   Schema schema = Schema.parse(args.get(0));
   InputStream input;
   boolean needsClosing;
   if (args.get(1).equals("-")) {
     input = stdin;
     needsClosing = false;
   } else {
     input = new FileInputStream(args.get(1));
     needsClosing = true;
   }
   try {
     DatumReader<Object> reader = new GenericDatumReader<Object>(schema);
     Object datum = reader.read(null, DecoderFactory.get().binaryDecoder(input, null));
     DatumWriter<Object> writer = new GenericDatumWriter<Object>(schema);
     JsonGenerator g = new JsonFactory().createJsonGenerator(out, JsonEncoding.UTF8);
     g.useDefaultPrettyPrinter();
     writer.write(datum, EncoderFactory.get().jsonEncoder(schema, g));
     g.flush();
     out.println();
     out.flush();
   } finally {
     if (needsClosing) {
       input.close();
     }
   }
   return 0;
 }
  @Override
  public int run(String[] args) throws Exception {
    JobConf conf = new JobConf(getConf(), getClass());
    conf.setJobName("UFO count");

    String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
    if (otherArgs.length != 2) {
      System.err.println("Usage: avro UFO counter <in> <out>");
      System.exit(2);
    }

    FileInputFormat.addInputPath(conf, new Path(otherArgs[0]));
    Path outputPath = new Path(otherArgs[1]);
    FileOutputFormat.setOutputPath(conf, outputPath);
    outputPath.getFileSystem(conf).delete(outputPath);
    Schema input_schema = Schema.parse(getClass().getResourceAsStream("ufo.avsc"));
    AvroJob.setInputSchema(conf, input_schema);
    AvroJob.setMapOutputSchema(
        conf,
        Pair.getPairSchema(Schema.create(Schema.Type.STRING), Schema.create(Schema.Type.LONG)));

    AvroJob.setOutputSchema(conf, OUTPUT_SCHEMA);
    AvroJob.setMapperClass(conf, AvroRecordMapper.class);
    AvroJob.setReducerClass(conf, AvroRecordReducer.class);
    conf.setInputFormat(AvroInputFormat.class);
    JobClient.runJob(conf);

    return 0;
  }
Пример #4
0
public class AvroBytesRecord {

  public static final String BYTES_FIELD = "b";
  private static final String SCHEMA_JSON =
      "{\"type\": \"record\", \"name\": \"Bytes\", "
          + "\"fields\": ["
          + "{\"name\":\""
          + BYTES_FIELD
          + "\", \"type\":\"bytes\"}]}";
  public static final Schema SCHEMA = Schema.parse(SCHEMA_JSON);

  public static GenericRecord toGenericRecord(byte[] bytes) {
    GenericRecord record = new GenericData.Record(SCHEMA);
    record.put(BYTES_FIELD, ByteBuffer.wrap(bytes));
    return record;
  }

  public static GenericRecord toGenericRecord(Writable writable) throws IOException {
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    DataOutputStream dao = new DataOutputStream(baos);
    writable.write(dao);
    dao.close();
    return toGenericRecord(baos.toByteArray());
  }

  public static void fromGenericRecord(GenericRecord r, Writable w) throws IOException {
    ByteArrayInputStream bais = new ByteArrayInputStream(fromGenericRecord(r));
    DataInputStream dis = new DataInputStream(bais);
    w.readFields(dis);
  }

  public static byte[] fromGenericRecord(GenericRecord record) {
    return ((ByteBuffer) record.get(BYTES_FIELD)).array();
  }
}
Пример #5
0
  public static Schema parseSchema(String schemaString) {

    String completeSchema = resolveSchema(schemaString);
    Schema schema = Schema.parse(completeSchema);
    String name = schema.getFullName();
    schemas.put(name, schema);
    return schema;
  }
 @Override
 public void setConf(org.apache.hadoop.conf.Configuration conf) {
   if (conf == null) return; // you first get a null configuration - ignore that
   String mos = conf.get(AvroJob.MAP_OUTPUT_SCHEMA);
   Schema schema = Schema.parse(mos);
   pair = new Pair<Object, Object>(schema);
   Schema keySchema = Pair.getKeySchema(schema);
   final List<Field> fields = keySchema.getFields();
   final GenericRecord key = new GenericData.Record(keySchema);
   projector = new Projector(key, fields);
 }
Пример #7
0
  @Test
  public void testSchemolution() /* will not be televised */ throws AvroBaseException, IOException {
    testSaveJsonFormat();
    byte[] row = Bytes.toBytes("spullara");
    HTablePool pool = new HTablePool();
    HTableInterface userTable = pool.getTable(TABLE);
    try {
      Get get = new Get(row);
      Result userRow = userTable.get(get);
      byte[] schemaKey = userRow.getValue(COLUMN_FAMILY, Bytes.toBytes("s"));
      HTableInterface schemaTable = pool.getTable(SCHEMA_TABLE);
      Schema actual;
      try {
        Result schemaRow = schemaTable.get(new Get(schemaKey));
        actual =
            Schema.parse(
                Bytes.toString(schemaRow.getValue(Bytes.toBytes("avro"), Bytes.toBytes("s"))));
      } finally {
        pool.putTable(schemaTable);
      }
      JsonDecoder jd =
          new JsonDecoder(
              actual, Bytes.toString(userRow.getValue(COLUMN_FAMILY, Bytes.toBytes("d"))));

      // Read it as a slightly different schema lacking a field
      InputStream stream = getClass().getResourceAsStream("/User2.json");
      Schema expected = Schema.parse(stream);

      {
        SpecificDatumReader<User> sdr = new SpecificDatumReader<User>();
        sdr.setSchema(actual);
        sdr.setExpected(expected);
        User loaded = sdr.read(null, jd);
        assertEquals("Sam", loaded.firstName.toString());
        assertEquals(null, loaded.mobile);
      }
    } finally {
      pool.putTable(userTable);
    }
  }
Пример #8
0
  /**
   * Create a new Event Reader
   *
   * @param in
   * @throws IOException
   */
  @SuppressWarnings("deprecation")
  public EventReader(DataInputStream in) throws IOException {
    this.in = in;
    this.version = in.readLine();

    if (!EventWriter.VERSION.equals(version)) {
      throw new IOException("Incompatible event log version: " + version);
    }

    this.schema = Schema.parse(in.readLine());
    this.reader = new SpecificDatumReader(schema);
    this.decoder = DecoderFactory.get().jsonDecoder(schema, in);
  }
 @Override
 public void prepare(Map conf, TridentOperationContext context) {
   try {
     String path = (String) conf.get("DOCUMENT_PATH");
     schema = Schema.parse(PersistDocumentFunction.class.getResourceAsStream("/document.avsc"));
     File file = new File(path);
     DatumWriter<GenericRecord> datumWriter = new GenericDatumWriter<GenericRecord>(schema);
     dataFileWriter = new DataFileWriter<GenericRecord>(datumWriter);
     if (file.exists()) dataFileWriter.appendTo(file);
     else dataFileWriter.create(schema, file);
   } catch (IOException e) {
     throw new RuntimeException(e);
   }
 }
Пример #10
0
    private Schema getSchema(InputSplit split, JobConf job) {
      // Inside of a MR job, we can pull out the actual properties
      if (AvroSerdeUtils.insideMRJob(job)) {
        MapWork mapWork = Utilities.getMapWork(job);

        // Iterate over the Path -> Partition descriptions to find the partition
        // that matches our input split.
        for (Map.Entry<String, PartitionDesc> pathsAndParts :
            mapWork.getPathToPartitionInfo().entrySet()) {
          String partitionPath = pathsAndParts.getKey();
          if (pathIsInPartition(((FileSplit) split).getPath(), partitionPath)) {
            if (LOG.isInfoEnabled()) {
              LOG.info("Matching partition " + partitionPath + " with input split " + split);
            }

            Properties props = pathsAndParts.getValue().getProperties();
            if (props.containsKey(AvroSerdeUtils.SCHEMA_LITERAL)
                || props.containsKey(AvroSerdeUtils.SCHEMA_URL)) {
              try {
                return AvroSerdeUtils.determineSchemaOrThrowException(props);
              } catch (Exception e) {
                throw new RuntimeException("Avro serde exception", e);
              }
            } else {
              return null; // If it's not in this property, it won't be in any others
            }
          }
        }
        if (LOG.isInfoEnabled()) {
          LOG.info("Unable to match filesplit " + split + " with a partition.");
        }
      }

      // In "select * from table" situations (non-MR), we can add things to the job
      // It's safe to add this to the job since it's not *actually* a mapred job.
      // Here the global state is confined to just this process.
      String s = job.get(AvroSerdeUtils.AVRO_SERDE_SCHEMA);
      if (s != null) {
        LOG.info("Found the avro schema in the job: " + s);
        return Schema.parse(s);
      }
      // No more places to get the schema from. Give up.  May have to re-encode later.
      return null;
    }
@SuppressWarnings("all")
public class JhCounters extends org.apache.avro.specific.SpecificRecordBase
    implements org.apache.avro.specific.SpecificRecord {
  public static final org.apache.avro.Schema SCHEMA$ =
      org.apache.avro.Schema.parse(
          "{\"type\":\"record\",\"name\":\"JhCounters\",\"namespace\":\"org.apache.hadoop.mapreduce.jobhistory\",\"fields\":[{\"name\":\"name\",\"type\":\"string\"},{\"name\":\"groups\",\"type\":{\"type\":\"array\",\"items\":{\"type\":\"record\",\"name\":\"JhCounterGroup\",\"fields\":[{\"name\":\"name\",\"type\":\"string\"},{\"name\":\"displayName\",\"type\":\"string\"},{\"name\":\"counts\",\"type\":{\"type\":\"array\",\"items\":{\"type\":\"record\",\"name\":\"JhCounter\",\"fields\":[{\"name\":\"name\",\"type\":\"string\"},{\"name\":\"displayName\",\"type\":\"string\"},{\"name\":\"value\",\"type\":\"long\"}]}}}]}}}]}");
  public org.apache.avro.util.Utf8 name;
  public org.apache.avro.generic.GenericArray<org.apache.hadoop.mapreduce.jobhistory.JhCounterGroup>
      groups;

  public org.apache.avro.Schema getSchema() {
    return SCHEMA$;
  }

  public java.lang.Object get(int field$) {
    switch (field$) {
      case 0:
        return name;
      case 1:
        return groups;
      default:
        throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }

  @SuppressWarnings(value = "unchecked")
  public void put(int field$, java.lang.Object value$) {
    switch (field$) {
      case 0:
        name = (org.apache.avro.util.Utf8) value$;
        break;
      case 1:
        groups =
            (org.apache.avro.generic.GenericArray<
                    org.apache.hadoop.mapreduce.jobhistory.JhCounterGroup>)
                value$;
        break;
      default:
        throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }
}
Пример #12
0
@SuppressWarnings("all")
public class StateResult extends org.apache.avro.specific.SpecificRecordBase
    implements org.apache.avro.specific.SpecificRecord {
  public static final org.apache.avro.Schema SCHEMA$ =
      org.apache.avro.Schema.parse(
          "{\"type\":\"record\",\"name\":\"StateResult\",\"namespace\":\"com.borqs.information.rpc.service\",\"fields\":[{\"name\":\"mid\",\"type\":\"string\"},{\"name\":\"status\",\"type\":\"string\"}]}");
  public java.lang.CharSequence mid;
  public java.lang.CharSequence status;

  public org.apache.avro.Schema getSchema() {
    return SCHEMA$;
  }
  // Used by DatumWriter.  Applications should not call.
  public java.lang.Object get(int field$) {
    switch (field$) {
      case 0:
        return mid;
      case 1:
        return status;
      default:
        throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }
  // Used by DatumReader.  Applications should not call.
  @SuppressWarnings(value = "unchecked")
  public void put(int field$, java.lang.Object value$) {
    switch (field$) {
      case 0:
        mid = (java.lang.CharSequence) value$;
        break;
      case 1:
        status = (java.lang.CharSequence) value$;
        break;
      default:
        throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }
}
Пример #13
0
@SuppressWarnings("all")
public class TCell extends org.apache.avro.specific.SpecificRecordBase
    implements org.apache.avro.specific.SpecificRecord {
  public static final org.apache.avro.Schema SCHEMA$ =
      org.apache.avro.Schema.parse(
          "{\"type\":\"record\",\"name\":\"TCell\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"value\",\"type\":\"bytes\"},{\"name\":\"timestamp\",\"type\":\"long\"}]}");
  public java.nio.ByteBuffer value;
  public long timestamp;

  public org.apache.avro.Schema getSchema() {
    return SCHEMA$;
  }

  public java.lang.Object get(int field$) {
    switch (field$) {
      case 0:
        return value;
      case 1:
        return timestamp;
      default:
        throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }

  @SuppressWarnings(value = "unchecked")
  public void put(int field$, java.lang.Object value$) {
    switch (field$) {
      case 0:
        value = (java.nio.ByteBuffer) value$;
        break;
      case 1:
        timestamp = (java.lang.Long) value$;
        break;
      default:
        throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }
}
Пример #14
0
@SuppressWarnings("all")
public class ResponseError extends org.apache.avro.specific.SpecificExceptionBase
    implements org.apache.avro.specific.SpecificRecord {
  public static final org.apache.avro.Schema SCHEMA$ =
      org.apache.avro.Schema.parse(
          "{\"type\":\"error\",\"name\":\"ResponseError\",\"namespace\":\"com.borqs.server.base\",\"fields\":[{\"name\":\"code\",\"type\":\"int\"},{\"name\":\"message\",\"type\":\"string\"}]}");
  public int code;
  public java.lang.CharSequence message;

  public org.apache.avro.Schema getSchema() {
    return SCHEMA$;
  }
  // Used by DatumWriter.  Applications should not call.
  public java.lang.Object get(int field$) {
    switch (field$) {
      case 0:
        return code;
      case 1:
        return message;
      default:
        throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }
  // Used by DatumReader.  Applications should not call.
  @SuppressWarnings(value = "unchecked")
  public void put(int field$, java.lang.Object value$) {
    switch (field$) {
      case 0:
        code = (java.lang.Integer) value$;
        break;
      case 1:
        message = (java.lang.CharSequence) value$;
        break;
      default:
        throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }
}
 /** Initialize this sensor. */
 @Override
 public final void init() {
   schema = Schema.parse(getScheme());
   uri = EntityUriBuilder.nativeUri(schema.getNamespace(), schema.getName());
   LOG.debug("Sensor storing to URI: {}", uri);
 }
@SuppressWarnings("all")
public class TaskAttemptUnsuccessfulCompletion extends org.apache.avro.specific.SpecificRecordBase
    implements org.apache.avro.specific.SpecificRecord {
  public static final org.apache.avro.Schema SCHEMA$ =
      org.apache.avro.Schema.parse(
          "{\"type\":\"record\",\"name\":\"TaskAttemptUnsuccessfulCompletion\",\"namespace\":\"org.apache.hadoop.mapreduce.jobhistory\",\"fields\":[{\"name\":\"taskid\",\"type\":\"string\"},{\"name\":\"taskType\",\"type\":\"string\"},{\"name\":\"attemptId\",\"type\":\"string\"},{\"name\":\"finishTime\",\"type\":\"long\"},{\"name\":\"hostname\",\"type\":\"string\"},{\"name\":\"status\",\"type\":\"string\"},{\"name\":\"error\",\"type\":\"string\"}]}");
  public org.apache.avro.util.Utf8 taskid;
  public org.apache.avro.util.Utf8 taskType;
  public org.apache.avro.util.Utf8 attemptId;
  public long finishTime;
  public org.apache.avro.util.Utf8 hostname;
  public org.apache.avro.util.Utf8 status;
  public org.apache.avro.util.Utf8 error;

  public org.apache.avro.Schema getSchema() {
    return SCHEMA$;
  }

  public java.lang.Object get(int field$) {
    switch (field$) {
      case 0:
        return taskid;
      case 1:
        return taskType;
      case 2:
        return attemptId;
      case 3:
        return finishTime;
      case 4:
        return hostname;
      case 5:
        return status;
      case 6:
        return error;
      default:
        throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }

  @SuppressWarnings(value = "unchecked")
  public void put(int field$, java.lang.Object value$) {
    switch (field$) {
      case 0:
        taskid = (org.apache.avro.util.Utf8) value$;
        break;
      case 1:
        taskType = (org.apache.avro.util.Utf8) value$;
        break;
      case 2:
        attemptId = (org.apache.avro.util.Utf8) value$;
        break;
      case 3:
        finishTime = (java.lang.Long) value$;
        break;
      case 4:
        hostname = (org.apache.avro.util.Utf8) value$;
        break;
      case 5:
        status = (org.apache.avro.util.Utf8) value$;
        break;
      case 6:
        error = (org.apache.avro.util.Utf8) value$;
        break;
      default:
        throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }
}
Пример #17
0
public class CassandraServer implements Cassandra {
  private static Logger logger = Logger.getLogger(CassandraServer.class);

  private static final GenericArray<Column> EMPTY_SUBCOLUMNS =
      new GenericData.Array<Column>(
          0, Schema.parse("{\"type\":\"array\",\"items\":" + Column.SCHEMA$ + "}"));
  private static final Utf8 API_VERSION = new Utf8("0.0.0");

  @Override
  public ColumnOrSuperColumn get(
      Utf8 keyspace, Utf8 key, ColumnPath columnPath, ConsistencyLevel consistencyLevel)
      throws AvroRemoteException, InvalidRequestException, NotFoundException, UnavailableException,
          TimedOutException {
    if (logger.isDebugEnabled()) logger.debug("get");

    ColumnOrSuperColumn column =
        multigetInternal(
                keyspace.toString(), Arrays.asList(key.toString()), columnPath, consistencyLevel)
            .get(key.toString());

    if ((column.column == null) && (column.super_column == null)) {
      throw newNotFoundException("Path not found");
    }
    return column;
  }

  private Map<String, ColumnOrSuperColumn> multigetInternal(
      String keyspace, List<String> keys, ColumnPath cp, ConsistencyLevel level)
      throws InvalidRequestException, UnavailableException, TimedOutException {
    AvroValidation.validateColumnPath(keyspace, cp);

    // FIXME: This is repetitive.
    byte[] column, super_column;
    column = cp.column == null ? null : cp.column.array();
    super_column = cp.super_column == null ? null : cp.super_column.array();

    QueryPath path =
        new QueryPath(cp.column_family.toString(), column == null ? null : super_column);
    List<byte[]> nameAsList = Arrays.asList(column == null ? super_column : column);
    List<ReadCommand> commands = new ArrayList<ReadCommand>();
    for (String key : keys) {
      AvroValidation.validateKey(key);
      commands.add(new SliceByNamesReadCommand(keyspace, key, path, nameAsList));
    }

    Map<String, ColumnOrSuperColumn> columnFamiliesMap = new HashMap<String, ColumnOrSuperColumn>();
    Map<String, Collection<IColumn>> columnsMap = multigetColumns(commands, level);

    for (ReadCommand command : commands) {
      ColumnOrSuperColumn columnorsupercolumn;

      Collection<IColumn> columns = columnsMap.get(command.key);
      if (columns == null) {
        columnorsupercolumn = new ColumnOrSuperColumn();
      } else {
        assert columns.size() == 1;
        IColumn col = columns.iterator().next();

        if (col.isMarkedForDelete()) {
          columnorsupercolumn = new ColumnOrSuperColumn();
        } else {
          columnorsupercolumn =
              col instanceof org.apache.cassandra.db.Column
                  ? newColumnOrSuperColumn(newColumn(col.name(), col.value(), col.timestamp()))
                  : newColumnOrSuperColumn(
                      newSuperColumn(col.name(), avronateSubColumns(col.getSubColumns())));
        }
      }
      columnFamiliesMap.put(command.key, columnorsupercolumn);
    }

    return columnFamiliesMap;
  }

  private Map<String, Collection<IColumn>> multigetColumns(
      List<ReadCommand> commands, ConsistencyLevel level)
      throws InvalidRequestException, UnavailableException, TimedOutException {
    Map<String, ColumnFamily> cfamilies = readColumnFamily(commands, level);
    Map<String, Collection<IColumn>> columnFamiliesMap = new HashMap<String, Collection<IColumn>>();

    for (ReadCommand command : commands) {
      ColumnFamily cfamily = cfamilies.get(command.key);
      if (cfamily == null) continue;

      Collection<IColumn> columns = null;
      if (command.queryPath.superColumnName != null) {
        IColumn column = cfamily.getColumn(command.queryPath.superColumnName);
        if (column != null) {
          columns = column.getSubColumns();
        }
      } else {
        columns = cfamily.getSortedColumns();
      }

      if (columns != null && columns.size() != 0) {
        columnFamiliesMap.put(command.key, columns);
      }
    }

    return columnFamiliesMap;
  }

  protected Map<String, ColumnFamily> readColumnFamily(
      List<ReadCommand> commands, ConsistencyLevel consistency)
      throws InvalidRequestException, UnavailableException, TimedOutException {
    // TODO - Support multiple column families per row, right now row only contains 1 column family
    Map<String, ColumnFamily> columnFamilyKeyMap = new HashMap<String, ColumnFamily>();

    if (consistency == ConsistencyLevel.ZERO)
      throw newInvalidRequestException(
          "Consistency level zero may not be applied to read operations");

    if (consistency == ConsistencyLevel.ALL)
      throw newInvalidRequestException(
          "Consistency level all is not yet supported on read operations");

    List<Row> rows;
    try {
      rows = StorageProxy.readProtocol(commands, thriftConsistencyLevel(consistency));
    } catch (TimeoutException e) {
      throw new TimedOutException();
    } catch (IOException e) {
      throw new RuntimeException(e);
    }
    // FIXME: This suckage brought to you by StorageService and StorageProxy
    // which throw Thrift exceptions directly.
    catch (org.apache.cassandra.thrift.UnavailableException e) {
      throw new UnavailableException();
    }

    for (Row row : rows) {
      columnFamilyKeyMap.put(row.key, row.cf);
    }

    return columnFamilyKeyMap;
  }

  // Don't playa hate, avronate.
  public GenericArray<Column> avronateSubColumns(Collection<IColumn> columns) {
    if (columns == null || columns.isEmpty()) return EMPTY_SUBCOLUMNS;

    GenericData.Array<Column> avroColumns =
        new GenericData.Array<Column>(columns.size(), Column.SCHEMA$);

    for (IColumn column : columns) {
      if (column.isMarkedForDelete()) continue;

      Column avroColumn = newColumn(column.name(), column.value(), column.timestamp());
      avroColumns.add(avroColumn);
    }

    return avroColumns;
  }

  @Override
  public Void insert(
      Utf8 keyspace,
      Utf8 key,
      ColumnPath cp,
      ByteBuffer value,
      long timestamp,
      ConsistencyLevel consistencyLevel)
      throws AvroRemoteException, InvalidRequestException, UnavailableException, TimedOutException {
    if (logger.isDebugEnabled()) logger.debug("insert");

    // FIXME: This is repetitive.
    byte[] column, super_column;
    column = cp.column == null ? null : cp.column.array();
    super_column = cp.super_column == null ? null : cp.super_column.array();
    String column_family = cp.column_family.toString();
    String keyspace_string = keyspace.toString();

    AvroValidation.validateKey(keyspace_string);
    AvroValidation.validateColumnPath(keyspace_string, cp);

    RowMutation rm = new RowMutation(keyspace_string, key.toString());
    try {
      rm.add(new QueryPath(column_family, super_column, column), value.array(), timestamp);
    } catch (MarshalException e) {
      throw newInvalidRequestException(e.getMessage());
    }
    doInsert(consistencyLevel, rm);

    return null;
  }

  private void doInsert(ConsistencyLevel consistency, RowMutation rm)
      throws UnavailableException, TimedOutException {
    if (consistency != ConsistencyLevel.ZERO) {
      try {
        StorageProxy.mutateBlocking(Arrays.asList(rm), thriftConsistencyLevel(consistency));
      } catch (TimeoutException e) {
        throw new TimedOutException();
      } catch (org.apache.cassandra.thrift.UnavailableException thriftE) {
        throw new UnavailableException();
      }
    } else {
      StorageProxy.mutate(Arrays.asList(rm));
    }
  }

  @Override
  public Void batch_insert(
      Utf8 keyspace,
      Utf8 key,
      Map<Utf8, GenericArray<ColumnOrSuperColumn>> cfmap,
      ConsistencyLevel consistency)
      throws AvroRemoteException, InvalidRequestException, UnavailableException, TimedOutException {
    if (logger.isDebugEnabled()) logger.debug("batch_insert");

    String keyString = key.toString();
    String keyspaceString = keyspace.toString();

    AvroValidation.validateKey(keyString);

    for (Utf8 cfName : cfmap.keySet()) {
      for (ColumnOrSuperColumn cosc : cfmap.get(cfName))
        AvroValidation.validateColumnOrSuperColumn(keyspaceString, cfName.toString(), cosc);
    }

    doInsert(consistency, getRowMutation(keyspaceString, keyString, cfmap));
    return null;
  }

  // FIXME: This is copypasta from o.a.c.db.RowMutation, (RowMutation.getRowMutation uses Thrift
  // types directly).
  private static RowMutation getRowMutation(
      String keyspace, String key, Map<Utf8, GenericArray<ColumnOrSuperColumn>> cfmap) {
    RowMutation rm = new RowMutation(keyspace, key.trim());
    for (Map.Entry<Utf8, GenericArray<ColumnOrSuperColumn>> entry : cfmap.entrySet()) {
      String cfName = entry.getKey().toString();
      for (ColumnOrSuperColumn cosc : entry.getValue()) {
        if (cosc.column == null) {
          assert cosc.super_column != null;
          for (Column column : cosc.super_column.columns) {
            QueryPath path =
                new QueryPath(cfName, cosc.super_column.name.array(), column.name.array());
            rm.add(path, column.value.array(), column.timestamp);
          }
        } else {
          assert cosc.super_column == null;
          QueryPath path = new QueryPath(cfName, null, cosc.column.name.array());
          rm.add(path, cosc.column.value.array(), cosc.column.timestamp);
        }
      }
    }
    return rm;
  }

  private org.apache.cassandra.thrift.ConsistencyLevel thriftConsistencyLevel(
      ConsistencyLevel consistency) {
    switch (consistency) {
      case ZERO:
        return org.apache.cassandra.thrift.ConsistencyLevel.ZERO;
      case ONE:
        return org.apache.cassandra.thrift.ConsistencyLevel.ONE;
      case QUORUM:
        return org.apache.cassandra.thrift.ConsistencyLevel.QUORUM;
      case DCQUORUM:
        return org.apache.cassandra.thrift.ConsistencyLevel.DCQUORUM;
      case DCQUORUMSYNC:
        return org.apache.cassandra.thrift.ConsistencyLevel.DCQUORUMSYNC;
      case ALL:
        return org.apache.cassandra.thrift.ConsistencyLevel.ALL;
    }
    return null;
  }

  @Override
  public Utf8 get_api_version() throws AvroRemoteException {
    return API_VERSION;
  }
}
 @SuppressWarnings("deprecation")
 @Override
 public void setConf(Configuration conf) {
   schema = Schema.parse(conf.get(GROUP_SCHEMA));
 }
Пример #19
0
@SuppressWarnings("all")
public class ProtocolStatus extends PersistentBase {
  public static final Schema _SCHEMA =
      Schema.parse(
          "{\"type\":\"record\",\"name\":\"ProtocolStatus\",\"namespace\":\"org.apache.nutch.storage\",\"fields\":[{\"name\":\"code\",\"type\":\"int\"},{\"name\":\"args\",\"type\":{\"type\":\"array\",\"items\":\"string\"}},{\"name\":\"lastModified\",\"type\":\"long\"}]}");

  public static enum Field {
    CODE(0, "code"),
    ARGS(1, "args"),
    LAST_MODIFIED(2, "lastModified"),
    ;
    private int index;
    private String name;

    Field(int index, String name) {
      this.index = index;
      this.name = name;
    }

    public int getIndex() {
      return index;
    }

    public String getName() {
      return name;
    }

    public String toString() {
      return name;
    }
  };

  public static final String[] _ALL_FIELDS = {
    "code", "args", "lastModified",
  };

  static {
    PersistentBase.registerFields(ProtocolStatus.class, _ALL_FIELDS);
  }

  private int code;
  private GenericArray<Utf8> args;
  private long lastModified;

  public ProtocolStatus() {
    this(new StateManagerImpl());
  }

  public ProtocolStatus(StateManager stateManager) {
    super(stateManager);
    args = new ListGenericArray<Utf8>(getSchema().getField("args").schema());
  }

  public ProtocolStatus newInstance(StateManager stateManager) {
    return new ProtocolStatus(stateManager);
  }

  public Schema getSchema() {
    return _SCHEMA;
  }

  public Object get(int _field) {
    switch (_field) {
      case 0:
        return code;
      case 1:
        return args;
      case 2:
        return lastModified;
      default:
        throw new AvroRuntimeException("Bad index");
    }
  }

  @SuppressWarnings(value = "unchecked")
  public void put(int _field, Object _value) {
    if (isFieldEqual(_field, _value)) return;
    getStateManager().setDirty(this, _field);
    switch (_field) {
      case 0:
        code = (Integer) _value;
        break;
      case 1:
        args = (GenericArray<Utf8>) _value;
        break;
      case 2:
        lastModified = (Long) _value;
        break;
      default:
        throw new AvroRuntimeException("Bad index");
    }
  }

  public int getCode() {
    return (Integer) get(0);
  }

  public void setCode(int value) {
    put(0, value);
  }

  public GenericArray<Utf8> getArgs() {
    return (GenericArray<Utf8>) get(1);
  }

  public void addToArgs(Utf8 element) {
    getStateManager().setDirty(this, 1);
    args.add(element);
  }

  public long getLastModified() {
    return (Long) get(2);
  }

  public void setLastModified(long value) {
    put(2, value);
  }
}
Пример #20
0
@SuppressWarnings("all")
/** Kmer count Structure */
public class kcount extends org.apache.avro.specific.SpecificRecordBase
    implements org.apache.avro.specific.SpecificRecord {
  public static final org.apache.avro.Schema SCHEMA$ =
      org.apache.avro.Schema.parse(
          "{\"type\":\"record\",\"name\":\"kcount\",\"namespace\":\"org.avrotest\",\"doc\":\"Kmer count Structure\",\"fields\":[{\"name\":\"kmer\",\"type\":\"string\"},{\"name\":\"count\",\"type\":\"long\"}]}");
  @Deprecated public java.lang.CharSequence kmer;
  @Deprecated public long count;

  public org.apache.avro.Schema getSchema() {
    return SCHEMA$;
  }
  // Used by DatumWriter.  Applications should not call.
  public java.lang.Object get(int field$) {
    switch (field$) {
      case 0:
        return kmer;
      case 1:
        return count;
      default:
        throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }
  // Used by DatumReader.  Applications should not call.
  @SuppressWarnings(value = "unchecked")
  public void put(int field$, java.lang.Object value$) {
    switch (field$) {
      case 0:
        kmer = (java.lang.CharSequence) value$;
        break;
      case 1:
        count = (java.lang.Long) value$;
        break;
      default:
        throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }

  /** Gets the value of the 'kmer' field. */
  public java.lang.CharSequence getKmer() {
    return kmer;
  }

  /**
   * Sets the value of the 'kmer' field.
   *
   * @param value the value to set.
   */
  public void setKmer(java.lang.CharSequence value) {
    this.kmer = value;
  }

  /** Gets the value of the 'count' field. */
  public java.lang.Long getCount() {
    return count;
  }

  /**
   * Sets the value of the 'count' field.
   *
   * @param value the value to set.
   */
  public void setCount(java.lang.Long value) {
    this.count = value;
  }

  /** Creates a new kcount RecordBuilder */
  public static org.avrotest.kcount.Builder newBuilder() {
    return new org.avrotest.kcount.Builder();
  }

  /** Creates a new kcount RecordBuilder by copying an existing Builder */
  public static org.avrotest.kcount.Builder newBuilder(org.avrotest.kcount.Builder other) {
    return new org.avrotest.kcount.Builder(other);
  }

  /** Creates a new kcount RecordBuilder by copying an existing kcount instance */
  public static org.avrotest.kcount.Builder newBuilder(org.avrotest.kcount other) {
    return new org.avrotest.kcount.Builder(other);
  }

  /** RecordBuilder for kcount instances. */
  public static class Builder extends org.apache.avro.specific.SpecificRecordBuilderBase<kcount>
      implements org.apache.avro.data.RecordBuilder<kcount> {

    private java.lang.CharSequence kmer;
    private long count;

    /** Creates a new Builder */
    private Builder() {
      super(org.avrotest.kcount.SCHEMA$);
    }

    /** Creates a Builder by copying an existing Builder */
    private Builder(org.avrotest.kcount.Builder other) {
      super(other);
    }

    /** Creates a Builder by copying an existing kcount instance */
    private Builder(org.avrotest.kcount other) {
      super(org.avrotest.kcount.SCHEMA$);
      if (isValidValue(fields()[0], other.kmer)) {
        this.kmer = (java.lang.CharSequence) data().deepCopy(fields()[0].schema(), other.kmer);
        fieldSetFlags()[0] = true;
      }
      if (isValidValue(fields()[1], other.count)) {
        this.count = (java.lang.Long) data().deepCopy(fields()[1].schema(), other.count);
        fieldSetFlags()[1] = true;
      }
    }

    /** Gets the value of the 'kmer' field */
    public java.lang.CharSequence getKmer() {
      return kmer;
    }

    /** Sets the value of the 'kmer' field */
    public org.avrotest.kcount.Builder setKmer(java.lang.CharSequence value) {
      validate(fields()[0], value);
      this.kmer = value;
      fieldSetFlags()[0] = true;
      return this;
    }

    /** Checks whether the 'kmer' field has been set */
    public boolean hasKmer() {
      return fieldSetFlags()[0];
    }

    /** Clears the value of the 'kmer' field */
    public org.avrotest.kcount.Builder clearKmer() {
      kmer = null;
      fieldSetFlags()[0] = false;
      return this;
    }

    /** Gets the value of the 'count' field */
    public java.lang.Long getCount() {
      return count;
    }

    /** Sets the value of the 'count' field */
    public org.avrotest.kcount.Builder setCount(long value) {
      validate(fields()[1], value);
      this.count = value;
      fieldSetFlags()[1] = true;
      return this;
    }

    /** Checks whether the 'count' field has been set */
    public boolean hasCount() {
      return fieldSetFlags()[1];
    }

    /** Clears the value of the 'count' field */
    public org.avrotest.kcount.Builder clearCount() {
      fieldSetFlags()[1] = false;
      return this;
    }

    @Override
    public kcount build() {
      try {
        kcount record = new kcount();
        record.kmer =
            fieldSetFlags()[0] ? this.kmer : (java.lang.CharSequence) defaultValue(fields()[0]);
        record.count = fieldSetFlags()[1] ? this.count : (java.lang.Long) defaultValue(fields()[1]);
        return record;
      } catch (Exception e) {
        throw new org.apache.avro.AvroRuntimeException(e);
      }
    }
  }
}
Пример #21
0
 @Override
 public Schema getSchema() {
   return Schema.parse(
       "{\"type\":\"record\",\"name\":\"MockPersistent\",\"namespace\":\"org.apache.gora.mock.persistency\",\"fields\":[{\"name\":\"foo\",\"type\":\"int\"},{\"name\":\"baz\",\"type\":\"int\"}]}");
 }
  // Verify if the new avro schema being pushed is the same one as the old one
  // Does not have logic to check for Avro schema evolution yet
  public void verifyAvroSchema(String url) throws Exception {
    // create new n store def with schema from the metadata in the input
    // path
    Schema schema = AvroUtils.getAvroSchemaFromPath(getInputPath());
    int replicationFactor = props.getInt("build.replication.factor", 2);
    int requiredReads = props.getInt("build.required.reads", 1);
    int requiredWrites = props.getInt("build.required.writes", 1);
    String description = props.getString("push.store.description", "");
    String owners = props.getString("push.store.owners", "");

    String keySchema =
        "\n\t\t<type>avro-generic</type>\n\t\t<schema-info version=\"0\">"
            + schema.getField(keyField).schema()
            + "</schema-info>\n\t";
    String valSchema =
        "\n\t\t<type>avro-generic</type>\n\t\t<schema-info version=\"0\">"
            + schema.getField(valueField).schema()
            + "</schema-info>\n\t";

    boolean hasCompression = false;
    if (props.containsKey("build.compress.value")) hasCompression = true;

    if (hasCompression) {
      valSchema += "\t<compression><type>gzip</type></compression>\n\t";
    }

    if (props.containsKey("build.force.schema.key")) {
      keySchema = props.get("build.force.schema.key");
    }

    if (props.containsKey("build.force.schema.value")) {
      valSchema = props.get("build.force.schema.value");
    }

    String newStoreDefXml =
        VoldemortUtils.getStoreDefXml(
            storeName,
            replicationFactor,
            requiredReads,
            requiredWrites,
            props.containsKey("build.preferred.reads")
                ? props.getInt("build.preferred.reads")
                : null,
            props.containsKey("build.preferred.writes")
                ? props.getInt("build.preferred.writes")
                : null,
            (props.containsKey("push.force.schema.key"))
                ? props.getString("push.force.schema.key")
                : keySchema,
            (props.containsKey("push.force.schema.value"))
                ? props.getString("push.force.schema.value")
                : valSchema,
            description,
            owners);

    log.info("Verifying store: \n" + newStoreDefXml.toString());

    StoreDefinition newStoreDef = VoldemortUtils.getStoreDef(newStoreDefXml);

    // get store def from cluster
    log.info("Getting store definition from: " + url + " (node id " + this.nodeId + ")");

    AdminClient adminClient = new AdminClient(url, new AdminClientConfig());
    try {
      List<StoreDefinition> remoteStoreDefs =
          adminClient.getRemoteStoreDefList(this.nodeId).getValue();
      boolean foundStore = false;

      // go over all store defs and see if one has the same name as the
      // store we're trying
      // to build
      for (StoreDefinition remoteStoreDef : remoteStoreDefs) {
        if (remoteStoreDef.getName().equals(storeName)) {
          // if the store already exists, but doesn't match what we
          // want to push, we need
          // to worry
          if (!remoteStoreDef.equals(newStoreDef)) {

            // let's check to see if the key/value serializers are
            // REALLY equal.
            SerializerDefinition localKeySerializerDef = newStoreDef.getKeySerializer();
            SerializerDefinition localValueSerializerDef = newStoreDef.getValueSerializer();
            SerializerDefinition remoteKeySerializerDef = remoteStoreDef.getKeySerializer();
            SerializerDefinition remoteValueSerializerDef = remoteStoreDef.getValueSerializer();

            if (remoteKeySerializerDef.getName().equals("avro-generic")
                && remoteValueSerializerDef.getName().equals("avro-generic")
                && remoteKeySerializerDef.getAllSchemaInfoVersions().size() == 1
                && remoteValueSerializerDef.getAllSchemaInfoVersions().size() == 1) {
              Schema remoteKeyDef = Schema.parse(remoteKeySerializerDef.getCurrentSchemaInfo());
              Schema remoteValDef = Schema.parse(remoteValueSerializerDef.getCurrentSchemaInfo());
              Schema localKeyDef = Schema.parse(localKeySerializerDef.getCurrentSchemaInfo());
              Schema localValDef = Schema.parse(localValueSerializerDef.getCurrentSchemaInfo());

              if (remoteKeyDef.equals(localKeyDef) && remoteValDef.equals(localValDef)) {
                String compressionPolicy = "";
                if (hasCompression) {
                  compressionPolicy = "\n\t\t<compression><type>gzip</type></compression>";
                }

                // if the key/value serializers are REALLY equal
                // (even though the strings may not match), then
                // just use the remote stores to GUARANTEE that
                // they
                // match, and try again.
                newStoreDefXml =
                    VoldemortUtils.getStoreDefXml(
                        storeName,
                        replicationFactor,
                        requiredReads,
                        requiredWrites,
                        props.containsKey("build.preferred.reads")
                            ? props.getInt("build.preferred.reads")
                            : null,
                        props.containsKey("build.preferred.writes")
                            ? props.getInt("build.preferred.writes")
                            : null,
                        "\n\t\t<type>avro-generic</type>\n\t\t<schema-info version=\"0\">"
                            + remoteKeySerializerDef.getCurrentSchemaInfo()
                            + "</schema-info>\n\t",
                        "\n\t\t<type>avro-generic</type>\n\t\t<schema-info version=\"0\">"
                            + remoteValueSerializerDef.getCurrentSchemaInfo()
                            + "</schema-info>"
                            + compressionPolicy
                            + "\n\t");

                newStoreDef = VoldemortUtils.getStoreDef(newStoreDefXml);

                if (!remoteStoreDef.equals(newStoreDef)) {
                  // if we still get a fail, then we know that
                  // the
                  // store defs don't match for reasons OTHER
                  // than
                  // the key/value serializer
                  throw new RuntimeException(
                      "Your store schema is identical, but the store definition does not match. Have: "
                          + newStoreDef
                          + "\nBut expected: "
                          + remoteStoreDef);
                }
              } else {
                // if the key/value serializers are not equal
                // (even
                // in java, not just json strings), then fail
                throw new RuntimeException(
                    "Your store definition does not match the store definition that is already in the cluster. Tried to resolve identical schemas between local and remote, but failed. Have: "
                        + newStoreDef
                        + "\nBut expected: "
                        + remoteStoreDef);
              }
            }
          }

          foundStore = true;
          break;
        }
      }

      // if the store doesn't exist yet, create it
      if (!foundStore) {
        // New requirement - Make sure the user had description and
        // owner specified
        if (description.length() == 0) {
          throw new RuntimeException(
              "Description field missing in store definition. "
                  + "Please add \"push.store.description\" with a line describing your store");
        }

        if (owners.length() == 0) {
          throw new RuntimeException(
              "Owner field missing in store definition. "
                  + "Please add \"push.store.owners\" with value being comma-separated list of LinkedIn email ids");
        }

        log.info("Could not find store " + storeName + " on Voldemort. Adding it to all nodes ");
        adminClient.addStore(newStoreDef);
      }

      storeDefs =
          ImmutableList.of(
              VoldemortUtils.getStoreDef(
                  VoldemortUtils.getStoreDefXml(
                      storeName,
                      replicationFactor,
                      requiredReads,
                      requiredWrites,
                      props.containsKey("build.preferred.reads")
                          ? props.getInt("build.preferred.reads")
                          : null,
                      props.containsKey("build.preferred.writes")
                          ? props.getInt("build.preferred.writes")
                          : null,
                      keySchema,
                      valSchema)));
      cluster = adminClient.getAdminClientCluster();
    } finally {
      adminClient.stop();
    }
  }