/** * Makes a quick request to the aggregation manager to see whether the cell value required by a * particular cell request is in external cache. * * <p>'Quick' is relative. It is an asynchronous request (due to the aggregation manager being an * actor) and therefore somewhat slow. If the segment is in cache, will save batching up future * requests and re-executing the query. Win should be particularly noticeable for queries running * on a populated cache. Without this feature, every query would require at least two iterations. * * <p>Request does not issue SQL to populate the segment. Nor does it try to find existing * segments for rollup. Those operations can wait until next phase. * * <p>Client is responsible for adding the segment to its private cache. * * @param request Cell request * @return Segment with data, or null if not in cache */ public SegmentWithData peek(final CellRequest request) { final SegmentCacheManager.PeekResponse response = execute(new PeekCommand(request, Locus.peek())); for (SegmentHeader header : response.headerMap.keySet()) { final SegmentBody body = compositeCache.get(header); if (body != null) { final SegmentBuilder.SegmentConverter converter = response.converterMap.get(SegmentCacheIndexImpl.makeConverterKey(header)); if (converter != null) { return converter.convert(header, body); } } } for (Map.Entry<SegmentHeader, Future<SegmentBody>> entry : response.headerMap.entrySet()) { final Future<SegmentBody> bodyFuture = entry.getValue(); if (bodyFuture != null) { final SegmentBody body = Util.safeGet(bodyFuture, "Waiting for segment to load"); final SegmentHeader header = entry.getKey(); final SegmentBuilder.SegmentConverter converter = response.converterMap.get(SegmentCacheIndexImpl.makeConverterKey(header)); if (converter != null) { return converter.convert(header, body); } } } return null; }
public void visit(final SegmentRemoveEvent event) { indexRegistry.getIndex(event.star).remove(event.header); event.monitor.sendEvent( new CellCacheSegmentDeleteEvent( event.timestamp, event.serverId, event.connectionId, event.statementId, event.executionId, event.header.getConstrainedColumns().size(), CellCacheEvent.Source.CACHE_CONTROL)); // Remove the segment from external caches. Use an executor, because // it may take some time. We discard the future, because we don't // care too much if it fails. final Future<?> future = event.cacheMgr.cacheExecutor.submit( new Runnable() { public void run() { try { // Note that the SegmentCache API doesn't require // us to verify that the segment exists (by calling // "contains") before we call "remove". event.cacheMgr.compositeCache.remove(event.header); } catch (Throwable e) { LOGGER.warn("remove header failed: " + event.header, e); } } }); Util.safeGet(future, "SegmentCacheManager.segmentremoved"); }
public FlushResult call() throws Exception { // For each measure and each star, ask the index // which headers intersect. final List<SegmentHeader> headers = new ArrayList<SegmentHeader>(); final List<Member> measures = CacheControlImpl.findMeasures(region); final SegmentColumn[] flushRegion = CacheControlImpl.findAxisValues(region); final List<RolapStar> starList = CacheControlImpl.getStarList(region); for (Member member : measures) { if (!(member instanceof RolapStoredMeasure)) { continue; } final RolapStoredMeasure storedMeasure = (RolapStoredMeasure) member; final RolapStar star = storedMeasure.getCube().getStar(); final SegmentCacheIndex index = cacheMgr.indexRegistry.getIndex(star); headers.addAll( index.intersectRegion( member.getDimension().getSchema().getName(), ((RolapSchema) member.getDimension().getSchema()).getChecksum(), storedMeasure.getCube().getName(), storedMeasure.getName(), storedMeasure.getCube().getStar().getFactTable().getAlias(), flushRegion)); } // If flushRegion is empty, this means we must clear all // segments for the region's measures. if (flushRegion.length == 0) { for (final SegmentHeader header : headers) { for (RolapStar star : starList) { cacheMgr.indexRegistry.getIndex(star).remove(header); } // Remove the segment from external caches. Use an // executor, because it may take some time. We discard // the future, because we don't care too much if it fails. cacheControlImpl.trace( "discard segment - it cannot be constrained and maintain consistency:\n" + header.getDescription()); final Future<?> task = cacheMgr.cacheExecutor.submit( new Runnable() { public void run() { try { // Note that the SegmentCache API doesn't // require us to verify that the segment // exists (by calling "contains") before we // call "remove". cacheMgr.compositeCache.remove(header); } catch (Throwable e) { LOGGER.warn("remove header failed: " + header, e); } } }); Util.safeGet(task, "SegmentCacheManager.flush"); } return new FlushResult(Collections.<Callable<Boolean>>emptyList()); } // Now we know which headers intersect. For each of them, // we append an excluded region. // // TODO: Optimize the logic here. If a segment is mostly // empty, we should trash it completely. final List<Callable<Boolean>> callableList = new ArrayList<Callable<Boolean>>(); for (final SegmentHeader header : headers) { if (!header.canConstrain(flushRegion)) { // We have to delete that segment altogether. cacheControlImpl.trace( "discard segment - it cannot be constrained and maintain consistency:\n" + header.getDescription()); for (RolapStar star : starList) { cacheMgr.indexRegistry.getIndex(star).remove(header); } continue; } final SegmentHeader newHeader = header.constrain(flushRegion); for (final SegmentCacheWorker worker : cacheMgr.segmentCacheWorkers) { callableList.add( new Callable<Boolean>() { public Boolean call() throws Exception { boolean existed; if (worker.supportsRichIndex()) { final SegmentBody sb = worker.get(header); existed = worker.remove(header); if (sb != null) { worker.put(newHeader, sb); } } else { // The cache doesn't support rich index. We // have to clear the segment entirely. existed = worker.remove(header); } return existed; } }); } for (RolapStar star : starList) { SegmentCacheIndex index = cacheMgr.indexRegistry.getIndex(star); index.remove(header); index.add(newHeader, false, null); } } // Done return new FlushResult(callableList); }
public PeekResponse call() { final RolapStar.Measure measure = request.getMeasure(); final RolapStar star = measure.getStar(); final RolapSchema schema = star.getSchema(); final AggregationKey key = new AggregationKey(request); final List<SegmentHeader> headers = indexRegistry .getIndex(star) .locate( schema.getName(), schema.getChecksum(), measure.getCubeName(), measure.getName(), star.getFactTable().getAlias(), request.getConstrainedColumnsBitKey(), request.getMappedCellValues(), AggregationKey.getCompoundPredicateStringList( star, key.getCompoundPredicateList())); final Map<SegmentHeader, Future<SegmentBody>> headerMap = new HashMap<SegmentHeader, Future<SegmentBody>>(); final Map<List, SegmentBuilder.SegmentConverter> converterMap = new HashMap<List, SegmentBuilder.SegmentConverter>(); // Is there a pending segment? (A segment that has been created and // is loading via SQL.) for (final SegmentHeader header : headers) { final Future<SegmentBody> bodyFuture = indexRegistry.getIndex(star).getFuture(header); if (bodyFuture != null) { // Check if the DataSourceChangeListener wants us to clear // the current segment if (star.getChangeListener() != null && star.getChangeListener().isAggregationChanged(key)) { /* * We can't satisfy this request, and we must clear the * data from our cache. We clear it from the index * first, then queue up a job in the background * to remove the data from all the caches. */ indexRegistry.getIndex(star).remove(header); Util.safeGet( cacheExecutor.submit( new Runnable() { public void run() { try { compositeCache.remove(header); } catch (Throwable e) { LOGGER.warn("remove header failed: " + header, e); } } }), "SegmentCacheManager.peek"); } converterMap.put( SegmentCacheIndexImpl.makeConverterKey(header), getConverter(star, header)); headerMap.put(header, bodyFuture); } } return new PeekResponse(headerMap, converterMap); }