@Override public Long insert(final Subscription subscription) { Long result = dbi.withHandle( new HandleCallback<Long>() { @Override public Long withHandle(Handle handle) throws Exception { return handle .createStatement( "insert into subscriptions (topic, metadata, channel) values (:topic, :metadata, :channel)") .bind("topic", subscription.getTopic()) .bind("metadata", mapper.writeValueAsString(subscription.getMetadata())) .bind("channel", subscription.getChannel()) .executeAndReturnGeneratedKeys(LongMapper.FIRST) .first(); } }); if (!Strings.isNullOrEmpty(subscription.getMetadata().getFeed())) { subscriptionCache.removeFeedSubscriptions(subscription.getMetadata().getFeed()); } if (!Strings.isNullOrEmpty(subscription.getTopic())) { subscriptionCache.removeTopicSubscriptions(subscription.getTopic()); } return result; }
private Notification fetchNotification(final String notificationId) { return dbi.withHandle( new HandleCallback<Notification>() { @Override public Notification withHandle(final Handle handle) throws Exception { return handle .createQuery( " select" + " record_id " + ", id" + ", class_name" + ", account_id" + ", notification_key" + ", created_date" + ", creating_owner" + ", effective_date" + ", queue_name" + ", processing_owner" + ", processing_available_date" + ", processing_state" + " from notifications " + " where " + " id = '" + notificationId + "';") .map(new NotificationSqlMapper()) .first(); } }); }
public Set<Subscription> loadByFeed(final String feed) { Set<Subscription> subscriptions = subscriptionCache.loadFeedSubscriptions(feed); if (subscriptions != null && !subscriptions.isEmpty()) { return subscriptions; } return dbi.withHandle( new HandleCallback<Set<Subscription>>() { @Override public Set<Subscription> withHandle(Handle handle) throws Exception { FeedEventMetaData metadata = new FeedEventMetaData(feed); Set<Subscription> subscriptions = ImmutableSet.copyOf( handle .createQuery( "select id, metadata, channel, topic from subscriptions where metadata = :metadata") .bind("metadata", mapper.writeValueAsString(metadata)) .map(new SubscriptionMapper()) .list()); subscriptionCache.addFeedSubscriptions(feed, subscriptions); return subscriptions; } }); }
public boolean isCompleted(final long timeout) { synchronized (this) { long waitTimeMs = timeout; do { try { final long before = System.currentTimeMillis(); wait(100); if (completed) { // TODO PIERRE Kludge alert! // When we arrive here, we got notified by the current thread (Bus listener) that we // received // all expected events. But other handlers might still be processing them. // Since there is only one bus thread, and that the test thread waits for all events to // be processed, // we're guaranteed that all are processed when the bus events table is empty. // We also need to wait for in-processing notifications (see // https://github.com/killbill/killbill/issues/475). // This is really similar to TestResource#waitForNotificationToComplete. await() .atMost(timeout, TimeUnit.MILLISECONDS) .until( new Callable<Boolean>() { @Override public Boolean call() throws Exception { final long pending = idbi.withHandle(new PendingBusOrNotificationCallback(clock)); log.debug("Events still in processing: {}", pending); return pending == 0; } }); return completed; } final long after = System.currentTimeMillis(); waitTimeMs -= (after - before); } catch (final Exception ignore) { // Rerun one more time to provide details final long pending = idbi.withHandle(new PendingBusOrNotificationCallback(clock)); log.error( "isCompleted : Received all events but found remaining unprocessed bus events/notifications = {}", pending); return false; } } while (waitTimeMs > 0 && !completed); } if (!completed) { final Joiner joiner = Joiner.on(" "); log.error( "TestApiListener did not complete in " + timeout + " ms, remaining events are " + joiner.join(nextExpectedEvent)); } return completed; }
@BeforeTest(groups = "slow") public void cleanupDb() { dbi.withHandle( new HandleCallback<Void>() { @Override public Void withHandle(final Handle handle) throws Exception { handle.execute("delete from notifications"); handle.execute("delete from claimed_notifications"); return null; } }); }
@Override public Subscription loadSubscriptionById(final Long id) { return dbi.withHandle( new HandleCallback<Subscription>() { @Override public Subscription withHandle(Handle handle) throws Exception { return handle .createQuery( "select id, topic, metadata, channel from subscriptions where id = :id") .bind("id", id) .map(new SubscriptionMapper()) .first(); } }); }
@Test public void testInsertShard() { long tableId = createTable("test"); long shardId = dao.insertShard(UUID.randomUUID(), tableId, 13, 42, 84); String sql = "SELECT table_id, row_count, compressed_size, uncompressed_size " + "FROM shards WHERE shard_id = ?"; List<Map<String, Object>> shards = dbi.withHandle(handle -> handle.select(sql, shardId)); assertEquals(shards.size(), 1); Map<String, Object> shard = shards.get(0); assertEquals(shard.get("table_id"), tableId); assertEquals(shard.get("row_count"), 13L); assertEquals(shard.get("compressed_size"), 42L); assertEquals(shard.get("uncompressed_size"), 84L); }
@Override public Set<Subscription> loadByStartsWithTopic(final String topic) { return dbi.withHandle( new HandleCallback<Set<Subscription>>() { @Override public Set<Subscription> withHandle(Handle handle) throws Exception { Set<Subscription> subscriptions = ImmutableSet.copyOf( handle .createQuery( "select id, metadata, channel, topic from subscriptions where topic like :topic") .bind("topic", topic + "%") .map(new SubscriptionMapper()) .list()); return subscriptions; } }); }
@Override public boolean run() { final List<DataSegment> segments = IndexGeneratorJob.getPublishedSegments(config); dbi.withHandle( new HandleCallback<Void>() { @Override public Void withHandle(Handle handle) throws Exception { final PreparedBatch batch = handle.prepareBatch( String.format( "INSERT INTO %s (id, dataSource, created_date, start, end, partitioned, version, used, payload) " + "VALUES (:id, :dataSource, :created_date, :start, :end, :partitioned, :version, :used, :payload)", config.getUpdaterJobSpec().getSegmentTable())); for (final DataSegment segment : segments) { batch.add( new ImmutableMap.Builder<String, Object>() .put("id", segment.getIdentifier()) .put("dataSource", segment.getDataSource()) .put("created_date", new DateTime().toString()) .put("start", segment.getInterval().getStart().toString()) .put("end", segment.getInterval().getEnd().toString()) .put("partitioned", segment.getShardSpec().getPartitionNum()) .put("version", segment.getVersion()) .put("used", true) .put( "payload", HadoopDruidIndexerConfig.jsonMapper.writeValueAsString(segment)) .build()); log.info("Published %s", segment.getIdentifier()); } batch.execute(); return null; } }); return true; }
@Override public boolean deleteSubscriptionById(final Long id) { return dbi.withHandle( new HandleCallback<Boolean>() { @Override public Boolean withHandle(Handle handle) throws Exception { Subscription subscription = handle .createQuery( "select id, metadata, channel, topic from subscriptions where id = :id") .bind("id", id) .map(new SubscriptionMapper()) .first(); if (Objects.equal(null, subscription)) { return true; } else { if (!Objects.equal(null, subscription.getMetadata()) && !Strings.isNullOrEmpty(subscription.getMetadata().getFeed())) { subscriptionCache.removeFeedSubscriptions(subscription.getMetadata().getFeed()); } if (!Strings.isNullOrEmpty(subscription.getTopic())) { subscriptionCache.removeTopicSubscriptions(subscription.getTopic()); } handle .createStatement("delete from feed_events where subscription_id = :id") .bind("id", id) .execute(); return 1 == handle .createStatement("delete from subscriptions where id = :id") .bind("id", id) .execute(); } } }); }
public void publishSegments( final String tableName, final List<DataSegment> segments, final ObjectMapper mapper) { dbi.withHandle( new HandleCallback<Void>() { @Override public Void withHandle(Handle handle) throws Exception { final PreparedBatch batch = handle.prepareBatch( String.format( "INSERT INTO %s (id, dataSource, created_date, start, \"end\", partitioned, version, used, payload) " + "VALUES (:id, :dataSource, :created_date, :start, :end, :partitioned, :version, :used, :payload)", tableName)); for (final DataSegment segment : segments) { batch.add( new ImmutableMap.Builder<String, Object>() .put("id", segment.getIdentifier()) .put("dataSource", segment.getDataSource()) .put("created_date", new DateTime().toString()) .put("start", segment.getInterval().getStart().toString()) .put("end", segment.getInterval().getEnd().toString()) .put( "partitioned", (segment.getShardSpec() instanceof NoneShardSpec) ? false : true) .put("version", segment.getVersion()) .put("used", true) .put("payload", mapper.writeValueAsBytes(segment)) .build()); log.info("Published %s", segment.getIdentifier()); } batch.execute(); return null; } }); }
private void streamingAggregateLevel(final int aggregationLevel, final int chunksToAggregate) { final List<TimelineChunk> sourceTimelineCandidates = new ArrayList<TimelineChunk>(); final TimelineChunkConsumer aggregationConsumer = new TimelineChunkConsumer() { int lastSourceId = 0; int lastMetricId = 0; @Override public void processTimelineChunk(final TimelineChunk candidate) { timelineChunksConsidered.incrementAndGet(); final int sourceId = candidate.getSourceId(); final int metricId = candidate.getMetricId(); if (lastSourceId == 0) { lastSourceId = sourceId; lastMetricId = metricId; } if (lastSourceId != sourceId || lastMetricId != metricId) { aggregatesCreated.addAndGet( aggregateTimelineCandidates( sourceTimelineCandidates, aggregationLevel, chunksToAggregate)); sourceTimelineCandidates.clear(); lastSourceId = sourceId; lastMetricId = metricId; } sourceTimelineCandidates.add(candidate); } }; final long startTime = System.currentTimeMillis(); try { dbi.withHandle( new HandleCallback<Void>() { @Override public Void withHandle(final Handle handle) throws Exception { // MySQL needs special setup to make it stream the results. See: // http://javaquirks.blogspot.com/2007/12/mysql-streaming-result-set.html // http://stackoverflow.com/questions/2447324/streaming-large-result-sets-with-mysql final Query<Map<String, Object>> query = handle .createQuery("getStreamingAggregationCandidates") .setFetchSize(Integer.MIN_VALUE) .bind("aggregationLevel", aggregationLevel) .bind("tenantRecordId", MeterInternalTenantContext.INTERNAL_TENANT_RECORD_ID); query.setStatementLocator( new StringTemplate3StatementLocator(TimelineAggregatorSqlDao.class)); ResultIterator<TimelineChunk> iterator = null; try { iterator = query.map(timelineChunkMapper).iterator(); while (iterator.hasNext()) { aggregationConsumer.processTimelineChunk(iterator.next()); } } catch (Exception e) { log.error( String.format("Exception during aggregation of level %d", aggregationLevel), e); } finally { if (iterator != null) { iterator.close(); } } return null; } }); if (sourceTimelineCandidates.size() >= chunksToAggregate) { aggregatesCreated.addAndGet( aggregateTimelineCandidates( sourceTimelineCandidates, aggregationLevel, chunksToAggregate)); } if (chunkIdsToInvalidateOrDelete.size() > 0) { performWrites(); } } finally { msSpentAggregating.addAndGet(System.currentTimeMillis() - startTime); } }
@Override public Set<Subscription> loadByTopic(final String topicQuery) { final Set<String> topicSubQueries = decomposeTopicQuery(topicQuery); final Map<String, Optional<Subscription>> cachedResults = subscriptionCache.loadTopicSubscriptions(topicSubQueries); final Set<Subscription> result = new HashSet<Subscription>(); // Iterate through the results from the cache, and remove any topics // that were found from the list of topics left to query, and add // the non-null subscriptions to the list of results for (String topic : cachedResults.keySet()) { if (cachedResults.get(topic).isPresent()) { result.add(cachedResults.get(topic).get()); topicSubQueries.remove(topic); } } // all topics that are found in the cache will be removed, so if no // topic subqueries are left, we are done if (!topicSubQueries.isEmpty()) { Collection<Subscription> dbResults = dbi.withHandle( new HandleCallback<Collection<Subscription>>() { @Override public Collection<Subscription> withHandle(Handle handle) throws Exception { final Set<Subscription> dbResult = new HashSet<Subscription>(); InClauseExpander in = new InClauseExpander(topicSubQueries); Iterator<Subscription> subscriptionsIter = handle .createQuery( "select id, metadata, channel, topic from subscriptions where topic in (" + in.getExpansion() + ")") .bindNamedArgumentFinder(in) .map(new SubscriptionMapper()) .iterator(); if (subscriptionsIter != null) { while (subscriptionsIter.hasNext()) { Subscription dbSubscription = subscriptionsIter.next(); dbResult.add(dbSubscription); subscriptionCache.addTopicSubscriptions( dbSubscription.getTopic(), Optional.of(dbSubscription)); topicSubQueries.remove(dbSubscription.getTopic()); } } return dbResult; } }); // Add the database results to the results from the cache result.addAll(dbResults); // Add empty subscriptions to the cache if (!topicSubQueries.isEmpty()) { subscriptionCache.addEmptyTopicSubscriptions(topicSubQueries); } } return ImmutableSet.copyOf(result); }