/** Returns the {@link SegmentCacheIndex} for a given {@link SegmentHeader}. */ private SegmentCacheIndex getIndex(SegmentHeader header) { // First we check the indexes that already exist. // This is fast. for (Entry<RolapStar, SegmentCacheIndex> entry : indexes.entrySet()) { final String factTableName = entry.getKey().getFactTable().getTableName(); final ByteString schemaChecksum = entry.getKey().getSchema().getChecksum(); if (!factTableName.equals(header.rolapStarFactTableName)) { continue; } if (!schemaChecksum.equals(header.schemaChecksum)) { continue; } return entry.getValue(); } // The index doesn't exist. Let's create it. for (RolapSchema schema : RolapSchema.getRolapSchemas()) { if (!schema.getChecksum().equals(header.schemaChecksum)) { continue; } // We have a schema match. RolapStar star = schema.getStar(header.rolapStarFactTableName); if (star != null) { // Found it. indexes.put(star, new SegmentCacheIndexImpl(thread)); } return indexes.get(star); } return null; }
public Object visit(ConnectionStartEvent event) { final MutableConnectionInfo conn = new MutableConnectionInfo(event.stack); connectionMap.put(event.connectionId, conn); foo(conn, event); foo(server.aggConn, event); if (RolapUtil.MONITOR_LOGGER.isTraceEnabled()) { RolapUtil.MONITOR_LOGGER.trace( "Connection(" + event.connectionId + ") created. stack is:" + Util.nl + event.stack); } return null; }
public Object visit(ExecutionPhaseEvent event) { final MutableExecutionInfo exec = executionMap.get(event.executionId); if (exec == null) { return missing(event); } executionMap.put(event.executionId, exec); foo(exec, event); foo(exec.stmt.aggExec, event); foo(exec.stmt.conn.aggExec, event); foo(server.aggExec, event); return null; }
public Object visit(ExecutionEndEvent event) { final MutableExecutionInfo exec = executionMap.remove(event.executionId); if (exec == null) { return missing(event); } retiredExecutionMap.put(exec.executionId, exec); foo(exec, event); foo(exec.stmt.aggExec, event); foo(exec.stmt.conn.aggExec, event); foo(server.aggExec, event); // Since the execution info will no longer be in the table, // broadcast the final info to anyone who is interested. RolapUtil.MONITOR_LOGGER.debug(exec.fix()); return null; }
/** * Retrieves the response from the queue matching the given key, blocking until it is received. * * @param k Response * @return Response * @throws InterruptedException if interrupted while waiting */ public synchronized V take(K k) throws InterruptedException { final V v = taken.remove(k); if (v != null) { return v; } // Take the laundry out of the machine. If it's ours, leave with it. // If it's someone else's, fold it neatly and put it on the pile. for (; ; ) { final Pair<K, V> pair = queue.take(); if (pair.left.equals(k)) { return pair.right; } else { taken.put(pair.left, pair.right); } } }
public Object visit(StatementStartEvent event) { final MutableConnectionInfo conn = connectionMap.get(event.connectionId); if (conn == null) { return missing(event); } final MutableStatementInfo stmt = new MutableStatementInfo(conn, event.statementId, event.stack); statementMap.put(event.statementId, stmt); foo(stmt, event); foo(conn.aggStmt, event); foo(server.aggStmt, event); if (RolapUtil.MONITOR_LOGGER.isTraceEnabled()) { RolapUtil.MONITOR_LOGGER.trace( "Statement(" + event.statementId + ") created. stack is:" + Util.nl + event.stack); } return null; }
public Object visit(ExecutionStartEvent event) { MutableStatementInfo stmt = statementMap.get(event.statementId); if (stmt == null) { return missing(event); } final MutableExecutionInfo exec = new MutableExecutionInfo(stmt, event.executionId, event.stack); executionMap.put(event.executionId, exec); foo(exec, event); foo(stmt.aggExec, event); foo(stmt.conn.aggExec, event); foo(server.aggExec, event); if (RolapUtil.MONITOR_LOGGER.isTraceEnabled()) { RolapUtil.MONITOR_LOGGER.trace( "Execution(" + event.executionId + ") created. stack is:" + Util.nl + event.stack); } return null; }
public Object visit(SqlStatementStartEvent event) { final MutableStatementInfo stmt = statementMap.get(event.getStatementId()); if (stmt == null) { return missing(event); } final MutableSqlStatementInfo sql = new MutableSqlStatementInfo(stmt, event.sqlStatementId, event.sql, event.stack); sqlStatementMap.put(event.sqlStatementId, sql); foo(sql, event); foo(sql.stmt.aggSql, event); foo(server.aggSql, event); if (RolapUtil.MONITOR_LOGGER.isTraceEnabled()) { RolapUtil.MONITOR_LOGGER.trace( "SqlStatement(" + event.sqlStatementId + ") created. stack is:" + Util.nl + event.stack); } return null; }
/** Returns the {@link SegmentCacheIndex} for a given {@link RolapStar}. */ public SegmentCacheIndex getIndex(RolapStar star) { if (!indexes.containsKey(star)) { indexes.put(star, new SegmentCacheIndexImpl(thread)); } return indexes.get(star); }
public PeekResponse call() { final RolapStar.Measure measure = request.getMeasure(); final RolapStar star = measure.getStar(); final RolapSchema schema = star.getSchema(); final AggregationKey key = new AggregationKey(request); final List<SegmentHeader> headers = indexRegistry .getIndex(star) .locate( schema.getName(), schema.getChecksum(), measure.getCubeName(), measure.getName(), star.getFactTable().getAlias(), request.getConstrainedColumnsBitKey(), request.getMappedCellValues(), AggregationKey.getCompoundPredicateStringList( star, key.getCompoundPredicateList())); final Map<SegmentHeader, Future<SegmentBody>> headerMap = new HashMap<SegmentHeader, Future<SegmentBody>>(); final Map<List, SegmentBuilder.SegmentConverter> converterMap = new HashMap<List, SegmentBuilder.SegmentConverter>(); // Is there a pending segment? (A segment that has been created and // is loading via SQL.) for (final SegmentHeader header : headers) { final Future<SegmentBody> bodyFuture = indexRegistry.getIndex(star).getFuture(header); if (bodyFuture != null) { // Check if the DataSourceChangeListener wants us to clear // the current segment if (star.getChangeListener() != null && star.getChangeListener().isAggregationChanged(key)) { /* * We can't satisfy this request, and we must clear the * data from our cache. We clear it from the index * first, then queue up a job in the background * to remove the data from all the caches. */ indexRegistry.getIndex(star).remove(header); Util.safeGet( cacheExecutor.submit( new Runnable() { public void run() { try { compositeCache.remove(header); } catch (Throwable e) { LOGGER.warn("remove header failed: " + header, e); } } }), "SegmentCacheManager.peek"); } converterMap.put( SegmentCacheIndexImpl.makeConverterKey(header), getConverter(star, header)); headerMap.put(header, bodyFuture); } } return new PeekResponse(headerMap, converterMap); }