예제 #1
0
 @BeforeMethod
 public void setup() throws Exception {
   IDBI dbi = new DBI("jdbc:h2:mem:test" + System.nanoTime());
   dummyHandle = dbi.open();
   dataDir = Files.createTempDir();
   shardManager = new DatabaseShardManager(dbi);
 }
예제 #2
0
 @BeforeMethod
 public void setup() throws Exception {
   dbi = new DBI("jdbc:h2:mem:test" + System.nanoTime());
   dummyHandle = dbi.open();
   dao = dbi.onDemand(ShardManagerDao.class);
   createTablesWithRetry(dbi);
 }
  @Override
  public Long insert(final Subscription subscription) {
    Long result =
        dbi.withHandle(
            new HandleCallback<Long>() {
              @Override
              public Long withHandle(Handle handle) throws Exception {
                return handle
                    .createStatement(
                        "insert into subscriptions (topic, metadata, channel) values (:topic, :metadata, :channel)")
                    .bind("topic", subscription.getTopic())
                    .bind("metadata", mapper.writeValueAsString(subscription.getMetadata()))
                    .bind("channel", subscription.getChannel())
                    .executeAndReturnGeneratedKeys(LongMapper.FIRST)
                    .first();
              }
            });

    if (!Strings.isNullOrEmpty(subscription.getMetadata().getFeed())) {
      subscriptionCache.removeFeedSubscriptions(subscription.getMetadata().getFeed());
    }
    if (!Strings.isNullOrEmpty(subscription.getTopic())) {
      subscriptionCache.removeTopicSubscriptions(subscription.getTopic());
    }

    return result;
  }
예제 #4
0
 private Notification fetchNotification(final String notificationId) {
   return dbi.withHandle(
       new HandleCallback<Notification>() {
         @Override
         public Notification withHandle(final Handle handle) throws Exception {
           return handle
               .createQuery(
                   "   select"
                       + " record_id "
                       + ", id"
                       + ", class_name"
                       + ", account_id"
                       + ", notification_key"
                       + ", created_date"
                       + ", creating_owner"
                       + ", effective_date"
                       + ", queue_name"
                       + ", processing_owner"
                       + ", processing_available_date"
                       + ", processing_state"
                       + "    from notifications "
                       + " where "
                       + " id = '"
                       + notificationId
                       + "';")
               .map(new NotificationSqlMapper())
               .first();
         }
       });
 }
  public Set<Subscription> loadByFeed(final String feed) {
    Set<Subscription> subscriptions = subscriptionCache.loadFeedSubscriptions(feed);
    if (subscriptions != null && !subscriptions.isEmpty()) {
      return subscriptions;
    }

    return dbi.withHandle(
        new HandleCallback<Set<Subscription>>() {
          @Override
          public Set<Subscription> withHandle(Handle handle) throws Exception {
            FeedEventMetaData metadata = new FeedEventMetaData(feed);
            Set<Subscription> subscriptions =
                ImmutableSet.copyOf(
                    handle
                        .createQuery(
                            "select id, metadata, channel, topic from subscriptions where metadata = :metadata")
                        .bind("metadata", mapper.writeValueAsString(metadata))
                        .map(new SubscriptionMapper())
                        .list());

            subscriptionCache.addFeedSubscriptions(feed, subscriptions);

            return subscriptions;
          }
        });
  }
예제 #6
0
  public ShardIterator(
      long tableId,
      boolean merged,
      Optional<Map<Integer, String>> bucketToNode,
      TupleDomain<RaptorColumnHandle> effectivePredicate,
      IDBI dbi) {
    this.merged = merged;
    this.bucketToNode = bucketToNode.orElse(null);
    ShardPredicate predicate = ShardPredicate.create(effectivePredicate, bucketToNode.isPresent());

    String sql;
    if (bucketToNode.isPresent()) {
      sql = "SELECT shard_uuid, bucket_number FROM %s WHERE %s ORDER BY bucket_number";
    } else {
      sql = "SELECT shard_uuid, node_ids FROM %s WHERE %s";
    }
    sql = format(sql, shardIndexTable(tableId), predicate.getPredicate());

    dao = onDemandDao(dbi, ShardDao.class);
    fetchNodes();

    try {
      connection = dbi.open().getConnection();
      statement = connection.prepareStatement(sql);
      enableStreamingResults(statement);
      predicate.bind(statement);
      log.debug("Running query: %s", statement);
      resultSet = statement.executeQuery();
    } catch (SQLException e) {
      close();
      throw metadataError(e);
    }
  }
예제 #7
0
  public boolean isCompleted(final long timeout) {
    synchronized (this) {
      long waitTimeMs = timeout;
      do {
        try {
          final long before = System.currentTimeMillis();
          wait(100);
          if (completed) {
            // TODO PIERRE Kludge alert!
            // When we arrive here, we got notified by the current thread (Bus listener) that we
            // received
            // all expected events. But other handlers might still be processing them.
            // Since there is only one bus thread, and that the test thread waits for all events to
            // be processed,
            // we're guaranteed that all are processed when the bus events table is empty.
            // We also need to wait for in-processing notifications (see
            // https://github.com/killbill/killbill/issues/475).
            // This is really similar to TestResource#waitForNotificationToComplete.
            await()
                .atMost(timeout, TimeUnit.MILLISECONDS)
                .until(
                    new Callable<Boolean>() {
                      @Override
                      public Boolean call() throws Exception {
                        final long pending =
                            idbi.withHandle(new PendingBusOrNotificationCallback(clock));
                        log.debug("Events still in processing: {}", pending);
                        return pending == 0;
                      }
                    });
            return completed;
          }
          final long after = System.currentTimeMillis();
          waitTimeMs -= (after - before);
        } catch (final Exception ignore) {
          // Rerun one more time to provide details
          final long pending = idbi.withHandle(new PendingBusOrNotificationCallback(clock));
          log.error(
              "isCompleted : Received all events but found remaining unprocessed bus events/notifications =  {}",
              pending);
          return false;
        }
      } while (waitTimeMs > 0 && !completed);
    }

    if (!completed) {
      final Joiner joiner = Joiner.on(" ");
      log.error(
          "TestApiListener did not complete in "
              + timeout
              + " ms, remaining events are "
              + joiner.join(nextExpectedEvent));
    }
    return completed;
  }
예제 #8
0
  @BeforeMethod(groups = "slow")
  public void setUp() throws Exception {
    final IDBI dbi = helper.getDBI();
    accountTagSqlDao = dbi.onDemand(BusinessAccountTagSqlDao.class);
    final BusinessInvoiceTagSqlDao invoiceTagSqlDao = dbi.onDemand(BusinessInvoiceTagSqlDao.class);
    final BusinessInvoicePaymentTagSqlDao invoicePaymentTagSqlDao =
        dbi.onDemand(BusinessInvoicePaymentTagSqlDao.class);
    subscriptionTransitionTagSqlDao = dbi.onDemand(BusinessSubscriptionTransitionTagSqlDao.class);
    eventBus = new InMemoryBus();
    final AccountDao accountDao = new AuditedAccountDao(dbi, eventBus);
    final AccountEmailDao accountEmailDao = new AuditedAccountEmailDao(dbi);
    final DefaultClock clock = new DefaultClock();
    callContextFactory = new DefaultCallContextFactory(clock);
    accountUserApi = new DefaultAccountUserApi(callContextFactory, accountDao, accountEmailDao);
    final CatalogService catalogService =
        new DefaultCatalogService(
            Mockito.mock(CatalogConfig.class), Mockito.mock(VersionedCatalogLoader.class));
    final AddonUtils addonUtils = new AddonUtils(catalogService);
    final DefaultNotificationQueueService notificationQueueService =
        new DefaultNotificationQueueService(dbi, clock);
    final EntitlementDao entitlementDao =
        new AuditedEntitlementDao(
            dbi, clock, addonUtils, notificationQueueService, eventBus, catalogService);
    final PlanAligner planAligner = new PlanAligner(catalogService);
    final DefaultSubscriptionApiService apiService =
        new DefaultSubscriptionApiService(clock, entitlementDao, catalogService, planAligner);
    final DefaultSubscriptionFactory subscriptionFactory =
        new DefaultSubscriptionFactory(apiService, clock, catalogService);
    entitlementUserApi =
        new DefaultEntitlementUserApi(
            clock, entitlementDao, catalogService, apiService, subscriptionFactory, addonUtils);
    tagRecorder =
        new BusinessTagRecorder(
            accountTagSqlDao,
            invoicePaymentTagSqlDao,
            invoiceTagSqlDao,
            subscriptionTransitionTagSqlDao,
            accountUserApi,
            entitlementUserApi);

    eventBus.start();
  }
예제 #9
0
 @BeforeTest(groups = "slow")
 public void cleanupDb() {
   dbi.withHandle(
       new HandleCallback<Void>() {
         @Override
         public Void withHandle(final Handle handle) throws Exception {
           handle.execute("delete from notifications");
           handle.execute("delete from claimed_notifications");
           return null;
         }
       });
 }
예제 #10
0
  @BeforeMethod
  public void setup() throws Exception {
    temporary = createTempDir();
    File directory = new File(temporary, "data");
    storageService = new FileStorageService(directory);
    storageService.start();

    File backupDirectory = new File(temporary, "backup");
    fileBackupStore = new FileBackupStore(backupDirectory);
    fileBackupStore.start();
    backupStore = Optional.of(fileBackupStore);

    IDBI dbi = new DBI("jdbc:h2:mem:test" + System.nanoTime());
    dummyHandle = dbi.open();
    ShardManager shardManager = createShardManager(dbi);
    Duration discoveryInterval = new Duration(5, TimeUnit.MINUTES);
    recoveryManager =
        new ShardRecoveryManager(
            storageService, backupStore, nodeManager, shardManager, discoveryInterval, 10);

    shardRecorder = new InMemoryShardRecorder();
  }
 /**
  * @param entitySqlDaoTransactionWrapper transaction to execute
  * @param <ReturnType> object type to return from the transaction
  * @return result from the transaction fo type ReturnType
  */
 public <ReturnType> ReturnType execute(
     final EntitySqlDaoTransactionWrapper<ReturnType> entitySqlDaoTransactionWrapper) {
   final Handle handle = dbi.open();
   try {
     final EntitySqlDao<EntityModelDao<Entity>, Entity> entitySqlDao =
         handle.attach(InitialEntitySqlDao.class);
     return entitySqlDao.inTransaction(
         TransactionIsolationLevel.READ_COMMITTED,
         new JdbiTransaction<ReturnType, EntityModelDao<Entity>, Entity>(
             handle, entitySqlDaoTransactionWrapper));
   } finally {
     handle.close();
   }
 }
 @Inject
 public TimelineAggregator(
     final IDBI dbi,
     final TimelineDao timelineDao,
     final TimelineCoder timelineCoder,
     final SampleCoder sampleCoder,
     final MeterConfig config) {
   this.dbi = dbi;
   this.timelineDao = timelineDao;
   this.timelineCoder = timelineCoder;
   this.sampleCoder = sampleCoder;
   this.config = config;
   this.aggregatorSqlDao = dbi.onDemand(TimelineAggregatorSqlDao.class);
   this.timelineChunkMapper = new TimelineChunkMapper();
 }
 @Override
 public Subscription loadSubscriptionById(final Long id) {
   return dbi.withHandle(
       new HandleCallback<Subscription>() {
         @Override
         public Subscription withHandle(Handle handle) throws Exception {
           return handle
               .createQuery(
                   "select id, topic, metadata, channel from subscriptions where id = :id")
               .bind("id", id)
               .map(new SubscriptionMapper())
               .first();
         }
       });
 }
예제 #14
0
  @Test
  public void testInsertShard() {
    long tableId = createTable("test");
    long shardId = dao.insertShard(UUID.randomUUID(), tableId, 13, 42, 84);

    String sql =
        "SELECT table_id, row_count, compressed_size, uncompressed_size "
            + "FROM shards WHERE shard_id = ?";
    List<Map<String, Object>> shards = dbi.withHandle(handle -> handle.select(sql, shardId));

    assertEquals(shards.size(), 1);
    Map<String, Object> shard = shards.get(0);

    assertEquals(shard.get("table_id"), tableId);
    assertEquals(shard.get("row_count"), 13L);
    assertEquals(shard.get("compressed_size"), 42L);
    assertEquals(shard.get("uncompressed_size"), 84L);
  }
  @Override
  public Set<Subscription> loadByStartsWithTopic(final String topic) {
    return dbi.withHandle(
        new HandleCallback<Set<Subscription>>() {
          @Override
          public Set<Subscription> withHandle(Handle handle) throws Exception {
            Set<Subscription> subscriptions =
                ImmutableSet.copyOf(
                    handle
                        .createQuery(
                            "select id, metadata, channel, topic from subscriptions where topic like :topic")
                        .bind("topic", topic + "%")
                        .map(new SubscriptionMapper())
                        .list());

            return subscriptions;
          }
        });
  }
예제 #16
0
  @Override
  public boolean run() {
    final List<DataSegment> segments = IndexGeneratorJob.getPublishedSegments(config);

    dbi.withHandle(
        new HandleCallback<Void>() {
          @Override
          public Void withHandle(Handle handle) throws Exception {
            final PreparedBatch batch =
                handle.prepareBatch(
                    String.format(
                        "INSERT INTO %s (id, dataSource, created_date, start, end, partitioned, version, used, payload) "
                            + "VALUES (:id, :dataSource, :created_date, :start, :end, :partitioned, :version, :used, :payload)",
                        config.getUpdaterJobSpec().getSegmentTable()));
            for (final DataSegment segment : segments) {

              batch.add(
                  new ImmutableMap.Builder<String, Object>()
                      .put("id", segment.getIdentifier())
                      .put("dataSource", segment.getDataSource())
                      .put("created_date", new DateTime().toString())
                      .put("start", segment.getInterval().getStart().toString())
                      .put("end", segment.getInterval().getEnd().toString())
                      .put("partitioned", segment.getShardSpec().getPartitionNum())
                      .put("version", segment.getVersion())
                      .put("used", true)
                      .put(
                          "payload",
                          HadoopDruidIndexerConfig.jsonMapper.writeValueAsString(segment))
                      .build());

              log.info("Published %s", segment.getIdentifier());
            }
            batch.execute();

            return null;
          }
        });

    return true;
  }
예제 #17
0
 private boolean _lock(long duration, TimeUnit unit) {
   if (handle == null) {
     handle = dbi.open();
     int got_lock =
         handle
             .createQuery("select get_lock(:name, :time)")
             .bind("name", name)
             .bind("time", unit.toSeconds(duration))
             .map(IntegerMapper.FIRST)
             .first();
     if (got_lock == 1) {
       return true;
     } else {
       handle.close();
       handle = null;
       return false;
     }
   } else {
     // we already have the lock!
     return true;
   }
 }
  @Override
  public boolean deleteSubscriptionById(final Long id) {
    return dbi.withHandle(
        new HandleCallback<Boolean>() {
          @Override
          public Boolean withHandle(Handle handle) throws Exception {
            Subscription subscription =
                handle
                    .createQuery(
                        "select id, metadata, channel, topic from subscriptions where id = :id")
                    .bind("id", id)
                    .map(new SubscriptionMapper())
                    .first();

            if (Objects.equal(null, subscription)) {
              return true;
            } else {
              if (!Objects.equal(null, subscription.getMetadata())
                  && !Strings.isNullOrEmpty(subscription.getMetadata().getFeed())) {
                subscriptionCache.removeFeedSubscriptions(subscription.getMetadata().getFeed());
              }
              if (!Strings.isNullOrEmpty(subscription.getTopic())) {
                subscriptionCache.removeTopicSubscriptions(subscription.getTopic());
              }

              handle
                  .createStatement("delete from feed_events where subscription_id = :id")
                  .bind("id", id)
                  .execute();

              return 1
                  == handle
                      .createStatement("delete from subscriptions where id = :id")
                      .bind("id", id)
                      .execute();
            }
          }
        });
  }
  public void publishSegments(
      final String tableName, final List<DataSegment> segments, final ObjectMapper mapper) {
    dbi.withHandle(
        new HandleCallback<Void>() {
          @Override
          public Void withHandle(Handle handle) throws Exception {
            final PreparedBatch batch =
                handle.prepareBatch(
                    String.format(
                        "INSERT INTO %s (id, dataSource, created_date, start, \"end\", partitioned, version, used, payload) "
                            + "VALUES (:id, :dataSource, :created_date, :start, :end, :partitioned, :version, :used, :payload)",
                        tableName));
            for (final DataSegment segment : segments) {

              batch.add(
                  new ImmutableMap.Builder<String, Object>()
                      .put("id", segment.getIdentifier())
                      .put("dataSource", segment.getDataSource())
                      .put("created_date", new DateTime().toString())
                      .put("start", segment.getInterval().getStart().toString())
                      .put("end", segment.getInterval().getEnd().toString())
                      .put(
                          "partitioned",
                          (segment.getShardSpec() instanceof NoneShardSpec) ? false : true)
                      .put("version", segment.getVersion())
                      .put("used", true)
                      .put("payload", mapper.writeValueAsBytes(segment))
                      .build());

              log.info("Published %s", segment.getIdentifier());
            }
            batch.execute();

            return null;
          }
        });
  }
  private void streamingAggregateLevel(final int aggregationLevel, final int chunksToAggregate) {
    final List<TimelineChunk> sourceTimelineCandidates = new ArrayList<TimelineChunk>();
    final TimelineChunkConsumer aggregationConsumer =
        new TimelineChunkConsumer() {

          int lastSourceId = 0;
          int lastMetricId = 0;

          @Override
          public void processTimelineChunk(final TimelineChunk candidate) {
            timelineChunksConsidered.incrementAndGet();
            final int sourceId = candidate.getSourceId();
            final int metricId = candidate.getMetricId();
            if (lastSourceId == 0) {
              lastSourceId = sourceId;
              lastMetricId = metricId;
            }
            if (lastSourceId != sourceId || lastMetricId != metricId) {
              aggregatesCreated.addAndGet(
                  aggregateTimelineCandidates(
                      sourceTimelineCandidates, aggregationLevel, chunksToAggregate));
              sourceTimelineCandidates.clear();
              lastSourceId = sourceId;
              lastMetricId = metricId;
            }
            sourceTimelineCandidates.add(candidate);
          }
        };
    final long startTime = System.currentTimeMillis();
    try {
      dbi.withHandle(
          new HandleCallback<Void>() {

            @Override
            public Void withHandle(final Handle handle) throws Exception {
              // MySQL needs special setup to make it stream the results. See:
              // http://javaquirks.blogspot.com/2007/12/mysql-streaming-result-set.html
              // http://stackoverflow.com/questions/2447324/streaming-large-result-sets-with-mysql
              final Query<Map<String, Object>> query =
                  handle
                      .createQuery("getStreamingAggregationCandidates")
                      .setFetchSize(Integer.MIN_VALUE)
                      .bind("aggregationLevel", aggregationLevel)
                      .bind("tenantRecordId", MeterInternalTenantContext.INTERNAL_TENANT_RECORD_ID);
              query.setStatementLocator(
                  new StringTemplate3StatementLocator(TimelineAggregatorSqlDao.class));
              ResultIterator<TimelineChunk> iterator = null;
              try {
                iterator = query.map(timelineChunkMapper).iterator();
                while (iterator.hasNext()) {
                  aggregationConsumer.processTimelineChunk(iterator.next());
                }
              } catch (Exception e) {
                log.error(
                    String.format("Exception during aggregation of level %d", aggregationLevel), e);
              } finally {
                if (iterator != null) {
                  iterator.close();
                }
              }
              return null;
            }
          });
      if (sourceTimelineCandidates.size() >= chunksToAggregate) {
        aggregatesCreated.addAndGet(
            aggregateTimelineCandidates(
                sourceTimelineCandidates, aggregationLevel, chunksToAggregate));
      }
      if (chunkIdsToInvalidateOrDelete.size() > 0) {
        performWrites();
      }
    } finally {
      msSpentAggregating.addAndGet(System.currentTimeMillis() - startTime);
    }
  }
 //
 // This is only used in the pagination APIs when streaming results. We want to keep the connection
 // open, and also there is no need
 // to send bus events, record notifications where we need to keep the Connection through the jDBI
 // Handle.
 //
 public <M extends EntityModelDao<E>, E extends Entity, T extends EntitySqlDao<M, E>>
     T onDemandForStreamingResults(final Class<T> sqlObjectType) {
   return dbi.onDemand(sqlObjectType);
 }
예제 #22
0
 @BeforeSuite(groups = "slow")
 public void setup() {
   dao = dbi.onDemand(NotificationSqlDao.class);
 }
  @Override
  public Set<Subscription> loadByTopic(final String topicQuery) {
    final Set<String> topicSubQueries = decomposeTopicQuery(topicQuery);

    final Map<String, Optional<Subscription>> cachedResults =
        subscriptionCache.loadTopicSubscriptions(topicSubQueries);

    final Set<Subscription> result = new HashSet<Subscription>();

    // Iterate through the results from the cache, and remove any topics
    // that were found from the list of topics left to query, and add
    // the non-null subscriptions to the list of results
    for (String topic : cachedResults.keySet()) {
      if (cachedResults.get(topic).isPresent()) {
        result.add(cachedResults.get(topic).get());
        topicSubQueries.remove(topic);
      }
    }

    // all topics that are found in the cache will be removed, so if no
    // topic subqueries are left, we are done
    if (!topicSubQueries.isEmpty()) {

      Collection<Subscription> dbResults =
          dbi.withHandle(
              new HandleCallback<Collection<Subscription>>() {
                @Override
                public Collection<Subscription> withHandle(Handle handle) throws Exception {

                  final Set<Subscription> dbResult = new HashSet<Subscription>();

                  InClauseExpander in = new InClauseExpander(topicSubQueries);

                  Iterator<Subscription> subscriptionsIter =
                      handle
                          .createQuery(
                              "select id, metadata, channel, topic from subscriptions where topic in ("
                                  + in.getExpansion()
                                  + ")")
                          .bindNamedArgumentFinder(in)
                          .map(new SubscriptionMapper())
                          .iterator();

                  if (subscriptionsIter != null) {
                    while (subscriptionsIter.hasNext()) {
                      Subscription dbSubscription = subscriptionsIter.next();
                      dbResult.add(dbSubscription);

                      subscriptionCache.addTopicSubscriptions(
                          dbSubscription.getTopic(), Optional.of(dbSubscription));
                      topicSubQueries.remove(dbSubscription.getTopic());
                    }
                  }

                  return dbResult;
                }
              });

      // Add the database results to the results from the cache
      result.addAll(dbResults);

      // Add empty subscriptions to the cache
      if (!topicSubQueries.isEmpty()) {
        subscriptionCache.addEmptyTopicSubscriptions(topicSubQueries);
      }
    }

    return ImmutableSet.copyOf(result);
  }
예제 #24
0
 @Override
 public BlockingStateDao get() {
   return dbi.onDemand(BlockingStateSqlDao.class);
 }
 @Inject
 public AuditLogViaHistoryCacheLoader(final IDBI dbi, final NonEntityDao nonEntityDao) {
   super(dbi, nonEntityDao);
   this.auditSqlDao = dbi.onDemand(AuditSqlDao.class);
 }
예제 #26
0
 private long createTable(String name) {
   return dbi.onDemand(MetadataDao.class).insertTable("test", name, false);
 }