Exemplo n.º 1
0
 /**
  * Removes a segment from segment index.
  *
  * <p>Call is asynchronous. It comes back immediately.
  *
  * <p>Does not remove it from the external cache.
  *
  * @param header segment header
  */
 public void remove(RolapStar star, SegmentHeader header) {
   final Locus locus = Locus.peek();
   ACTOR.event(
       handler,
       new SegmentRemoveEvent(
           System.currentTimeMillis(),
           locus.getServer().getMonitor(),
           locus.getServer().getId(),
           locus.execution.getMondrianStatement().getMondrianConnection().getId(),
           locus.execution.getMondrianStatement().getId(),
           locus.execution.getId(),
           this,
           star,
           header));
 }
Exemplo n.º 2
0
 /**
  * Informs cache manager that a segment load failed.
  *
  * <p>Called when a SQL statement receives an error while loading a segment.
  *
  * @param header segment header
  * @param throwable Error
  */
 public void loadFailed(RolapStar star, SegmentHeader header, Throwable throwable) {
   final Locus locus = Locus.peek();
   ACTOR.event(
       handler,
       new SegmentLoadFailedEvent(
           System.currentTimeMillis(),
           locus.getServer().getMonitor(),
           locus.getServer().getId(),
           locus.execution.getMondrianStatement().getMondrianConnection().getId(),
           locus.execution.getMondrianStatement().getId(),
           locus.execution.getId(),
           star,
           header,
           throwable));
 }
Exemplo n.º 3
0
 protected GroupingSet getGroupingSet(
     final String[] tableNames,
     final String[] fieldNames,
     final String[][] fieldValues,
     final String cubeName,
     final String measure) {
   return Locus.execute(
       ((RolapConnection) getConnection()),
       "BatchTestCase.getGroupingSet",
       new Locus.Action<GroupingSet>() {
         public GroupingSet execute() {
           final RolapCube cube = getCube(cubeName);
           final BatchLoader fbcr =
               new BatchLoader(
                   Locus.peek(),
                   ((RolapConnection) getConnection())
                       .getServer()
                       .getAggregationManager()
                       .cacheMgr,
                   cube.getStar().getSqlQueryDialect(),
                   cube);
           BatchLoader.Batch batch =
               createBatch(fbcr, tableNames, fieldNames, fieldValues, cubeName, measure);
           GroupingSetsCollector collector = new GroupingSetsCollector(true);
           final List<Future<Map<Segment, SegmentWithData>>> segmentFutures =
               new ArrayList<Future<Map<Segment, SegmentWithData>>>();
           batch.loadAggregation(collector, segmentFutures);
           return collector.getGroupingSets().get(0);
         }
       });
 }
Exemplo n.º 4
0
 /**
  * Makes a quick request to the aggregation manager to see whether the cell value required by a
  * particular cell request is in external cache.
  *
  * <p>'Quick' is relative. It is an asynchronous request (due to the aggregation manager being an
  * actor) and therefore somewhat slow. If the segment is in cache, will save batching up future
  * requests and re-executing the query. Win should be particularly noticeable for queries running
  * on a populated cache. Without this feature, every query would require at least two iterations.
  *
  * <p>Request does not issue SQL to populate the segment. Nor does it try to find existing
  * segments for rollup. Those operations can wait until next phase.
  *
  * <p>Client is responsible for adding the segment to its private cache.
  *
  * @param request Cell request
  * @return Segment with data, or null if not in cache
  */
 public SegmentWithData peek(final CellRequest request) {
   final SegmentCacheManager.PeekResponse response =
       execute(new PeekCommand(request, Locus.peek()));
   for (SegmentHeader header : response.headerMap.keySet()) {
     final SegmentBody body = compositeCache.get(header);
     if (body != null) {
       final SegmentBuilder.SegmentConverter converter =
           response.converterMap.get(SegmentCacheIndexImpl.makeConverterKey(header));
       if (converter != null) {
         return converter.convert(header, body);
       }
     }
   }
   for (Map.Entry<SegmentHeader, Future<SegmentBody>> entry : response.headerMap.entrySet()) {
     final Future<SegmentBody> bodyFuture = entry.getValue();
     if (bodyFuture != null) {
       final SegmentBody body = Util.safeGet(bodyFuture, "Waiting for segment to load");
       final SegmentHeader header = entry.getKey();
       final SegmentBuilder.SegmentConverter converter =
           response.converterMap.get(SegmentCacheIndexImpl.makeConverterKey(header));
       if (converter != null) {
         return converter.convert(header, body);
       }
     }
   }
   return null;
 }
Exemplo n.º 5
0
    public void run() {
      try {
        for (; ; ) {
          final Pair<Handler, Message> entry = eventQueue.take();
          final Handler handler = entry.left;
          final Message message = entry.right;
          try {
            // A message is either a command or an event.
            // A command returns a value that must be read by
            // the caller.
            if (message instanceof Command<?>) {
              Command<?> command = (Command<?>) message;
              try {
                Locus.push(command.getLocus());
                Object result = command.call();
                responseQueue.put(command, Pair.of(result, (Throwable) null));
              } catch (PleaseShutdownException e) {
                responseQueue.put(command, Pair.of(null, (Throwable) null));
                return; // exit event loop
              } catch (Throwable e) {
                responseQueue.put(command, Pair.of(null, e));
              } finally {
                Locus.pop(command.getLocus());
              }
            } else {
              Event event = (Event) message;
              event.acceptWithoutResponse(handler);

              // Broadcast the event to anyone who is interested.
              RolapUtil.MONITOR_LOGGER.debug(message);
            }
          } catch (Throwable e) {
            // REVIEW: Somewhere better to send it?
            e.printStackTrace();
          }
        }
      } catch (InterruptedException e) {
        // REVIEW: Somewhere better to send it?
        e.printStackTrace();
      } catch (Throwable e) {
        e.printStackTrace();
      }
    }
Exemplo n.º 6
0
    public void handle(final SegmentCacheEvent e) {
      if (e.isLocal()) {
        return;
      }
      Locus.execute(
          Execution.NONE,
          "AsyncCacheListener.handle",
          new Locus.Action<Void>() {
            public Void execute() {
              final SegmentCacheManager.Command<Void> command;
              final Locus locus = Locus.peek();
              switch (e.getEventType()) {
                case ENTRY_CREATED:
                  command =
                      new Command<Void>() {
                        public Void call() {
                          cacheMgr.externalSegmentCreated(e.getSource(), server);
                          return null;
                        }

                        public Locus getLocus() {
                          return locus;
                        }
                      };
                  break;
                case ENTRY_DELETED:
                  command =
                      new Command<Void>() {
                        public Void call() {
                          cacheMgr.externalSegmentDeleted(e.getSource(), server);
                          return null;
                        }

                        public Locus getLocus() {
                          return locus;
                        }
                      };
                  break;
                default:
                  throw new UnsupportedOperationException();
              }
              cacheMgr.execute(command);
              return null;
            }
          });
    }
Exemplo n.º 7
0
  /**
   * Checks that a given sequence of cell requests results in a particular SQL statement being
   * generated.
   *
   * <p>Always clears the cache before running the requests.
   *
   * <p>Runs the requests once for each SQL pattern in the current dialect. If there are multiple
   * patterns, runs the MDX query multiple times, and expects to see each SQL statement appear. If
   * there are no patterns in this dialect, the test trivially succeeds.
   *
   * @param requests Sequence of cell requests
   * @param patterns Set of patterns
   * @param negative Set to false in order to 'expect' a query or true to 'forbid' a query.
   */
  protected void assertRequestSql(CellRequest[] requests, SqlPattern[] patterns, boolean negative) {
    final RolapStar star = requests[0].getMeasure().getStar();
    final String cubeName = requests[0].getMeasure().getCubeName();
    final RolapCube cube = lookupCube(cubeName);
    final Dialect sqlDialect = star.getSqlQueryDialect();
    Dialect.DatabaseProduct d = sqlDialect.getDatabaseProduct();
    SqlPattern sqlPattern = SqlPattern.getPattern(d, patterns);
    if (d == Dialect.DatabaseProduct.UNKNOWN) {
      // If the dialect is not one in the pattern set, do not run the
      // test. We do not print any warning message.
      return;
    }

    boolean patternFound = false;
    for (SqlPattern pattern : patterns) {
      if (!pattern.hasDatabaseProduct(d)) {
        continue;
      }

      patternFound = true;

      clearCache(cube);

      String sql = sqlPattern.getSql();
      String trigger = sqlPattern.getTriggerSql();
      switch (d) {
        case ORACLE:
          sql = sql.replaceAll(" =as= ", " ");
          trigger = trigger.replaceAll(" =as= ", " ");
          break;
        case TERADATA:
          sql = sql.replaceAll(" =as= ", " as ");
          trigger = trigger.replaceAll(" =as= ", " as ");
          break;
      }

      // Create a dummy DataSource which will throw a 'bomb' if it is
      // asked to execute a particular SQL statement, but will otherwise
      // behave exactly the same as the current DataSource.
      RolapUtil.setHook(new TriggerHook(trigger));
      Bomb bomb;
      final Execution execution =
          new Execution(((RolapConnection) getConnection()).getInternalStatement(), 1000);
      final AggregationManager aggMgr =
          execution
              .getMondrianStatement()
              .getMondrianConnection()
              .getServer()
              .getAggregationManager();
      final Locus locus = new Locus(execution, "BatchTestCase", "BatchTestCase");
      try {
        FastBatchingCellReader fbcr =
            new FastBatchingCellReader(execution, getCube(cubeName), aggMgr);
        for (CellRequest request : requests) {
          fbcr.recordCellRequest(request);
        }
        // The FBCR will presume there is a current Locus in the stack,
        // so let's create a mock one.
        Locus.push(locus);
        fbcr.loadAggregations();
        bomb = null;
      } catch (Bomb e) {
        bomb = e;
      } finally {
        RolapUtil.setHook(null);
        Locus.pop(locus);
      }
      if (!negative && bomb == null) {
        fail("expected query [" + sql + "] did not occur");
      } else if (negative && bomb != null) {
        fail("forbidden query [" + sql + "] detected");
      }
      TestContext.assertEqualsVerbose(replaceQuotes(sql), replaceQuotes(bomb.sql));
    }

    // Print warning message that no pattern was specified for the current
    // dialect.
    if (!patternFound) {
      String warnDialect = MondrianProperties.instance().WarnIfNoPatternForDialect.get();

      if (warnDialect.equals(d.toString())) {
        System.out.println(
            "[No expected SQL statements found for dialect \""
                + sqlDialect.toString()
                + "\" and test not run]");
      }
    }
  }
Exemplo n.º 8
0
  /**
   * Closes all resources (statement, result set) held by this SqlStatement.
   *
   * <p>If any of them fails, wraps them in a {@link RuntimeException} describing the high-level
   * operation which this statement was performing. No further error-handling is required to produce
   * a descriptive stack trace, unless you want to absorb the error.
   */
  public void close() {
    if (haveSemaphore) {
      haveSemaphore = false;
      querySemaphore.leave();
    }

    // According to the JDBC spec, closing a statement automatically closes
    // its result sets, and closing a connection automatically closes its
    // statements. But let's be conservative and close everything
    // explicitly.
    Statement statement = null;
    SQLException ex = null;
    if (resultSet != null) {
      try {
        statement = resultSet.getStatement();
        resultSet.close();
      } catch (SQLException e) {
        ex = e;
      } finally {
        resultSet = null;
      }
    }
    if (statement != null) {
      try {
        statement.close();
      } catch (SQLException e) {
        if (ex != null) {
          ex = e;
        }
      }
    }
    if (jdbcConnection != null) {
      try {
        jdbcConnection.close();
      } catch (SQLException e) {
        if (ex != null) {
          ex = e;
        }
      } finally {
        jdbcConnection = null;
      }
    }

    if (ex != null) {
      throw Util.newError(ex, locus.message + "; sql=[" + sql + "]");
    }

    long endTime = System.currentTimeMillis();
    long totalMs = endTime - startTimeMillis;
    String status = ", exec+fetch " + totalMs + " ms, " + rowCount + " rows";

    locus.execution.getQueryTiming().markFull(TIMING_NAME + locus.component, totalMs);

    RolapUtil.SQL_LOGGER.debug(id + ": " + status);

    if (RolapUtil.LOGGER.isDebugEnabled()) {
      RolapUtil.LOGGER.debug(locus.component + ": done executing sql [" + sql + "]" + status);
    }

    locus
        .getServer()
        .getMonitor()
        .sendEvent(
            new SqlStatementEndEvent(endTime, id, locus, sql, getPurpose(), rowCount, false, null));
  }
Exemplo n.º 9
0
  /** Executes the current statement, and handles any SQLException. */
  public void execute() {
    assert state == State.FRESH : "cannot re-execute";
    state = State.ACTIVE;
    String status = "failed";
    Statement statement = null;
    try {
      this.jdbcConnection = dataSource.getConnection();
      querySemaphore.enter();
      haveSemaphore = true;
      // Trace start of execution.
      if (RolapUtil.SQL_LOGGER.isDebugEnabled()) {
        StringBuilder sqllog = new StringBuilder();
        sqllog.append(id).append(": ").append(locus.component).append(": executing sql [");
        if (sql.indexOf('\n') >= 0) {
          // SQL appears to be formatted as multiple lines. Make it
          // start on its own line.
          sqllog.append("\n");
        }
        sqllog.append(sql);
        sqllog.append(']');
        RolapUtil.SQL_LOGGER.debug(sqllog.toString());
      }

      // Execute hook.
      RolapUtil.ExecuteQueryHook hook = RolapUtil.getHook();
      if (hook != null) {
        hook.onExecuteQuery(sql);
      }
      startTimeNanos = System.nanoTime();
      startTimeMillis = System.currentTimeMillis();
      if (resultSetType < 0 || resultSetConcurrency < 0) {
        statement = jdbcConnection.createStatement();
      } else {
        statement = jdbcConnection.createStatement(resultSetType, resultSetConcurrency);
      }
      if (maxRows > 0) {
        statement.setMaxRows(maxRows);
      }

      // First make sure to register with the execution instance.
      locus.execution.registerStatement(locus, statement);

      locus
          .getServer()
          .getMonitor()
          .sendEvent(
              new SqlStatementStartEvent(
                  startTimeMillis, id, locus, sql, getPurpose(), getCellRequestCount()));

      this.resultSet = statement.executeQuery(sql);

      // skip to first row specified in request
      this.state = State.ACTIVE;
      if (firstRowOrdinal > 0) {
        if (resultSetType == ResultSet.TYPE_FORWARD_ONLY) {
          for (int i = 0; i < firstRowOrdinal; ++i) {
            if (!this.resultSet.next()) {
              this.state = State.DONE;
              break;
            }
          }
        } else {
          if (!this.resultSet.absolute(firstRowOrdinal)) {
            this.state = State.DONE;
          }
        }
      }

      long timeMillis = System.currentTimeMillis();
      long timeNanos = System.nanoTime();
      final long executeNanos = timeNanos - startTimeNanos;
      final long executeMillis = executeNanos / 1000000;
      Util.addDatabaseTime(executeMillis);
      status = ", exec " + executeMillis + " ms";

      locus
          .getServer()
          .getMonitor()
          .sendEvent(
              new SqlStatementExecuteEvent(timeMillis, id, locus, sql, getPurpose(), executeNanos));

      // Compute accessors. They ensure that we use the most efficient
      // method (e.g. getInt, getDouble, getObject) for the type of the
      // column. Even if you are going to box the result into an object,
      // it is better to use getInt than getObject; the latter might
      // return something daft like a BigDecimal (does, on the Oracle JDBC
      // driver).
      accessors.clear();
      for (Type type : guessTypes()) {
        accessors.add(createAccessor(accessors.size(), type));
      }
    } catch (Throwable e) {
      status = ", failed (" + e + ")";
      Util.close(resultSet, statement, jdbcConnection);

      if (haveSemaphore) {
        haveSemaphore = false;
        querySemaphore.leave();
      }
      if (e instanceof Error) {
        throw (Error) e;
      } else {
        throw handle(e);
      }
    } finally {
      RolapUtil.SQL_LOGGER.debug(id + ": " + status);

      if (RolapUtil.LOGGER.isDebugEnabled()) {
        RolapUtil.LOGGER.debug(locus.component + ": executing sql [" + sql + "]" + status);
      }
    }
  }