Esempio n. 1
0
 /** Returns the {@link SegmentCacheIndex} for a given {@link SegmentHeader}. */
 private SegmentCacheIndex getIndex(SegmentHeader header) {
   // First we check the indexes that already exist.
   // This is fast.
   for (Entry<RolapStar, SegmentCacheIndex> entry : indexes.entrySet()) {
     final String factTableName = entry.getKey().getFactTable().getTableName();
     final ByteString schemaChecksum = entry.getKey().getSchema().getChecksum();
     if (!factTableName.equals(header.rolapStarFactTableName)) {
       continue;
     }
     if (!schemaChecksum.equals(header.schemaChecksum)) {
       continue;
     }
     return entry.getValue();
   }
   // The index doesn't exist. Let's create it.
   for (RolapSchema schema : RolapSchema.getRolapSchemas()) {
     if (!schema.getChecksum().equals(header.schemaChecksum)) {
       continue;
     }
     // We have a schema match.
     RolapStar star = schema.getStar(header.rolapStarFactTableName);
     if (star != null) {
       // Found it.
       indexes.put(star, new SegmentCacheIndexImpl(thread));
     }
     return indexes.get(star);
   }
   return null;
 }
Esempio n. 2
0
 /**
  * Registers this Scenario with a Schema, creating a calulated member [Scenario].[{id}] for each
  * cube that has writeback enabled. (Currently a cube has writeback enabled iff it has a dimension
  * called "Scenario".)
  *
  * @param schema Schema
  */
 void register(RolapSchema schema) {
   // Add a value to the [Scenario] dimension of every cube that has
   // writeback enabled.
   for (RolapCube cube : schema.getCubeList()) {
     for (RolapHierarchy hierarchy : cube.getHierarchies()) {
       if (isScenario(hierarchy)) {
         member = cube.createCalculatedMember(hierarchy, getId() + "", new ScenarioCalc(this));
         assert member != null;
       }
     }
   }
 }
Esempio n. 3
0
    public PeekResponse call() {
      final RolapStar.Measure measure = request.getMeasure();
      final RolapStar star = measure.getStar();
      final RolapSchema schema = star.getSchema();
      final AggregationKey key = new AggregationKey(request);
      final List<SegmentHeader> headers =
          indexRegistry
              .getIndex(star)
              .locate(
                  schema.getName(),
                  schema.getChecksum(),
                  measure.getCubeName(),
                  measure.getName(),
                  star.getFactTable().getAlias(),
                  request.getConstrainedColumnsBitKey(),
                  request.getMappedCellValues(),
                  AggregationKey.getCompoundPredicateStringList(
                      star, key.getCompoundPredicateList()));

      final Map<SegmentHeader, Future<SegmentBody>> headerMap =
          new HashMap<SegmentHeader, Future<SegmentBody>>();
      final Map<List, SegmentBuilder.SegmentConverter> converterMap =
          new HashMap<List, SegmentBuilder.SegmentConverter>();

      // Is there a pending segment? (A segment that has been created and
      // is loading via SQL.)
      for (final SegmentHeader header : headers) {
        final Future<SegmentBody> bodyFuture = indexRegistry.getIndex(star).getFuture(header);
        if (bodyFuture != null) {
          // Check if the DataSourceChangeListener wants us to clear
          // the current segment
          if (star.getChangeListener() != null
              && star.getChangeListener().isAggregationChanged(key)) {
            /*
             * We can't satisfy this request, and we must clear the
             * data from our cache. We clear it from the index
             * first, then queue up a job in the background
             * to remove the data from all the caches.
             */
            indexRegistry.getIndex(star).remove(header);
            Util.safeGet(
                cacheExecutor.submit(
                    new Runnable() {
                      public void run() {
                        try {
                          compositeCache.remove(header);
                        } catch (Throwable e) {
                          LOGGER.warn("remove header failed: " + header, e);
                        }
                      }
                    }),
                "SegmentCacheManager.peek");
          }
          converterMap.put(
              SegmentCacheIndexImpl.makeConverterKey(header), getConverter(star, header));
          headerMap.put(header, bodyFuture);
        }
      }

      return new PeekResponse(headerMap, converterMap);
    }