コード例 #1
0
 public BoxJSONParser(final IBoxResourceHub hub) {
   mObjectMapper = new ObjectMapper();
   mObjectMapper.setSerializationInclusion(Include.NON_NULL);
   mObjectMapper.configure(DeserializationFeature.FAIL_ON_NULL_FOR_PRIMITIVES, false);
   mObjectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
   for (IBoxType type : hub.getAllTypes()) {
     mObjectMapper.registerSubtypes(new NamedType(hub.getClass(type), type.toString()));
   }
 }
コード例 #2
0
 @Before
 public void setUp() {
   mapper.registerSubtypes(MappableObject.class);
 }
コード例 #3
0
ファイル: WorkerNode.java プロジェクト: Web5design/druid
 private void initializeJacksonSubtypes() {
   jsonMapper.registerSubtypes(StaticS3FirehoseFactory.class);
 }
コード例 #4
0
  @Before
  public void setUp() throws Exception {
    emitter = EasyMock.createMock(ServiceEmitter.class);
    EmittingLogger.registerEmitter(emitter);
    queryRunnerFactoryConglomerate =
        EasyMock.createStrictMock(QueryRunnerFactoryConglomerate.class);
    monitorScheduler = EasyMock.createStrictMock(MonitorScheduler.class);
    publishCountDown = new CountDownLatch(1);
    announcedSinks = 0;
    pushedSegments = 0;
    tmpDir = temporaryFolder.newFolder();
    TestUtils testUtils = new TestUtils();
    mapper = testUtils.getTestObjectMapper();

    tqc =
        mapper.readValue(
            "{\"startDelay\":\"PT0S\", \"restartDelay\":\"PT1S\", \"storageSyncRate\":\"PT0.5S\"}",
            TaskQueueConfig.class);
    indexSpec = new IndexSpec();

    if (taskStorageType.equals("HeapMemoryTaskStorage")) {
      ts = new HeapMemoryTaskStorage(new TaskStorageConfig(null) {});
    } else if (taskStorageType.equals("MetadataTaskStorage")) {
      testDerbyConnector = derbyConnectorRule.getConnector();
      mapper.registerSubtypes(
          new NamedType(MockExceptionalFirehoseFactory.class, "mockExcepFirehoseFactory"),
          new NamedType(MockFirehoseFactory.class, "mockFirehoseFactory"));
      testDerbyConnector.createTaskTables();
      testDerbyConnector.createSegmentTable();
      ts =
          new MetadataTaskStorage(
              testDerbyConnector,
              new TaskStorageConfig(null),
              new SQLMetadataStorageActionHandlerFactory(
                  testDerbyConnector,
                  derbyConnectorRule.metadataTablesConfigSupplier().get(),
                  mapper));
    } else {
      throw new RuntimeException(String.format("Unknown task storage type [%s]", taskStorageType));
    }

    serverView =
        new FilteredServerView() {
          @Override
          public void registerSegmentCallback(
              Executor exec, ServerView.SegmentCallback callback, Predicate<DataSegment> filter) {
            segmentCallbacks.add(callback);
          }
        };
    setUpAndStartTaskQueue(
        new DataSegmentPusher() {
          @Override
          public String getPathForHadoop(String dataSource) {
            throw new UnsupportedOperationException();
          }

          @Override
          public DataSegment push(File file, DataSegment segment) throws IOException {
            pushedSegments++;
            return segment;
          }
        });
  }
コード例 #5
0
ファイル: AppenderatorTester.java プロジェクト: RaviNK/druid
  public AppenderatorTester(final int maxRowsInMemory, final File basePersistDirectory) {
    objectMapper = new DefaultObjectMapper();
    objectMapper.registerSubtypes(LinearShardSpec.class);

    final Map<String, Object> parserMap =
        objectMapper.convertValue(
            new MapInputRowParser(
                new JSONParseSpec(
                    new TimestampSpec("ts", "auto", null), new DimensionsSpec(null, null, null))),
            Map.class);
    schema =
        new DataSchema(
            DATASOURCE,
            parserMap,
            new AggregatorFactory[] {
              new CountAggregatorFactory("count"), new LongSumAggregatorFactory("met", "met")
            },
            new UniformGranularitySpec(Granularity.MINUTE, QueryGranularities.NONE, null),
            objectMapper);

    tuningConfig =
        new RealtimeTuningConfig(
            maxRowsInMemory,
            null,
            null,
            basePersistDirectory,
            null,
            null,
            null,
            null,
            null,
            null,
            0,
            0,
            null,
            null);

    metrics = new FireDepartmentMetrics();
    queryExecutor = Execs.singleThreaded("queryExecutor(%d)");

    indexIO =
        new IndexIO(
            objectMapper,
            new ColumnConfig() {
              @Override
              public int columnCacheSizeBytes() {
                return 0;
              }
            });
    indexMerger = new IndexMerger(objectMapper, indexIO);

    emitter =
        new ServiceEmitter(
            "test",
            "test",
            new LoggingEmitter(
                new Logger(AppenderatorTester.class), LoggingEmitter.Level.INFO, objectMapper));
    emitter.start();
    EmittingLogger.registerEmitter(emitter);
    dataSegmentPusher =
        new DataSegmentPusher() {
          @Deprecated
          @Override
          public String getPathForHadoop(String dataSource) {
            return getPathForHadoop();
          }

          @Override
          public String getPathForHadoop() {
            throw new UnsupportedOperationException();
          }

          @Override
          public DataSegment push(File file, DataSegment segment) throws IOException {
            pushedSegments.add(segment);
            return segment;
          }
        };
    appenderator =
        Appenderators.createRealtime(
            schema,
            tuningConfig,
            metrics,
            dataSegmentPusher,
            objectMapper,
            indexIO,
            indexMerger,
            new DefaultQueryRunnerFactoryConglomerate(
                ImmutableMap.<Class<? extends Query>, QueryRunnerFactory>of(
                    TimeseriesQuery.class,
                    new TimeseriesQueryRunnerFactory(
                        new TimeseriesQueryQueryToolChest(
                            new IntervalChunkingQueryRunnerDecorator(
                                queryExecutor, QueryRunnerTestHelper.NOOP_QUERYWATCHER, emitter)),
                        new TimeseriesQueryEngine(),
                        QueryRunnerTestHelper.NOOP_QUERYWATCHER))),
            new DataSegmentAnnouncer() {
              @Override
              public void announceSegment(DataSegment segment) throws IOException {}

              @Override
              public void unannounceSegment(DataSegment segment) throws IOException {}

              @Override
              public void announceSegments(Iterable<DataSegment> segments) throws IOException {}

              @Override
              public void unannounceSegments(Iterable<DataSegment> segments) throws IOException {}

              @Override
              public boolean isAnnounced(DataSegment segment) {
                return false;
              }
            },
            emitter,
            queryExecutor,
            MapCache.create(2048),
            new CacheConfig());
  }
コード例 #6
-1
  @Before
  public void setUp() throws Exception {
    testingCluster = new TestingCluster(1);
    testingCluster.start();

    cf =
        CuratorFrameworkFactory.builder()
            .connectString(testingCluster.getConnectString())
            .retryPolicy(new ExponentialBackoffRetry(1, 10))
            .compressionProvider(new PotentiallyGzippedCompressionProvider(false))
            .build();
    cf.start();
    cf.create().creatingParentsIfNeeded().forPath(basePath);

    worker = new Worker("worker", "localhost", 3, "0");

    workerCuratorCoordinator =
        new WorkerCuratorCoordinator(
            jsonMapper,
            new IndexerZkConfig(
                new ZkPathsConfig() {
                  @Override
                  public String getBase() {
                    return basePath;
                  }
                },
                null,
                null,
                null,
                null,
                null),
            new TestRemoteTaskRunnerConfig(new Period("PT1S")),
            cf,
            worker);
    workerCuratorCoordinator.start();

    // Start a task monitor
    workerTaskMonitor = createTaskMonitor();
    jsonMapper.registerSubtypes(new NamedType(TestMergeTask.class, "test"));
    jsonMapper.registerSubtypes(new NamedType(TestRealtimeTask.class, "test_realtime"));
    workerTaskMonitor.start();

    task = TestMergeTask.createDummyTask("test");
  }