コード例 #1
0
    /** Visit the specified nonterminal. */
    public void visit(NonTerminal nt) {
      Production p;

      try {
        p = analyzer.lookup(nt);
      } catch (IllegalArgumentException x) {
        // Too many productions. We assume the worst.
        isLexical = false;
        return;
      }

      if (null == p) {
        // No such production. We assume the worst.
        isLexical = false;

      } else if (analyzer.isProcessed(p.qName)) {
        // If the corresponding production has already been processed,
        // make sure it is lexical.
        if (!p.getBooleanProperty(Properties.LEXICAL)) {
          isLexical = false;
        }

      } else if (!analyzer.isBeingWorkedOn(p.qName)) {
        // The production has not been processed and is not yet under
        // consideration.  If is text-only, accept it.  If it is void,
        // check it out.
        if (p.getBooleanProperty(Properties.TEXT_ONLY)) {
          // Nothing to do.
        } else if (AST.isVoid(p.type)) {
          dispatch(p);
        } else {
          isLexical = false;
        }
      }
    }
コード例 #2
0
ファイル: AggregateInfoBase.java プロジェクト: RingsC/Impala
  /**
   * Returns a tuple descriptor for the aggregation/analytic's intermediate or final result,
   * depending on whether isOutputTuple is true or false. Also updates the appropriate substitution
   * map, and creates and registers auxiliary equality predicates between the grouping slots and the
   * grouping exprs.
   */
  private TupleDescriptor createTupleDesc(Analyzer analyzer, boolean isOutputTuple) {
    TupleDescriptor result =
        analyzer
            .getDescTbl()
            .createTupleDescriptor(tupleDebugName() + (isOutputTuple ? "-out" : "-intermed"));
    List<Expr> exprs =
        Lists.newArrayListWithCapacity(groupingExprs_.size() + aggregateExprs_.size());
    exprs.addAll(groupingExprs_);
    exprs.addAll(aggregateExprs_);

    int aggregateExprStartIndex = groupingExprs_.size();
    for (int i = 0; i < exprs.size(); ++i) {
      Expr expr = exprs.get(i);
      SlotDescriptor slotDesc = analyzer.addSlotDescriptor(result);
      slotDesc.setLabel(expr.toSql());
      slotDesc.setStats(ColumnStats.fromExpr(expr));
      Preconditions.checkState(expr.getType().isValid());
      slotDesc.setType(expr.getType());
      if (i < aggregateExprStartIndex) {
        // register equivalence between grouping slot and grouping expr;
        // do this only when the grouping expr isn't a constant, otherwise
        // it'll simply show up as a gratuitous HAVING predicate
        // (which would actually be incorrect if the constant happens to be NULL)
        if (!expr.isConstant()) {
          analyzer.createAuxEquivPredicate(new SlotRef(slotDesc), expr.clone());
        }
      } else {
        Preconditions.checkArgument(expr instanceof FunctionCallExpr);
        FunctionCallExpr aggExpr = (FunctionCallExpr) expr;
        if (aggExpr.isMergeAggFn()) {
          slotDesc.setLabel(aggExpr.getChild(0).toSql());
        } else {
          slotDesc.setLabel(aggExpr.toSql());
        }

        // count(*) is non-nullable.
        if (aggExpr.getFnName().getFunction().equals("count")) {
          // TODO: Consider making nullability a property of types or of builtin agg fns.
          // row_number, rank, and dense_rank are non-nullable as well.
          slotDesc.setIsNullable(false);
        }
        if (!isOutputTuple) {
          Type intermediateType = ((AggregateFunction) aggExpr.fn_).getIntermediateType();
          if (intermediateType != null) {
            // Use the output type as intermediate if the function has a wildcard decimal.
            if (!intermediateType.isWildcardDecimal()) {
              slotDesc.setType(intermediateType);
            } else {
              Preconditions.checkState(expr.getType().isDecimal());
            }
          }
        }
      }
    }
    String prefix = (isOutputTuple ? "result " : "intermediate ");
    LOG.trace(prefix + " tuple=" + result.debugString());
    return result;
  }
コード例 #3
0
  public static int[] classify(String[] data, String[] locations) {
    ArrayList<Entry> entries = new ArrayList<Entry>();
    for (String datum : data) {
      entries.add(new Entry(datum));
    }
    Analyzer analyzer = new Analyzer(entries);

    return analyzer.classify(new Constants());
  }
コード例 #4
0
ファイル: TestMockAnalyzer.java プロジェクト: PATRIC3/p3_solr
  public void testChangeGaps() throws Exception {
    // LUCENE-5324: check that it is possible to change the wrapper's gaps
    final int positionGap = random().nextInt(1000);
    final int offsetGap = random().nextInt(1000);
    final Analyzer delegate = new MockAnalyzer(random());
    final Analyzer a =
        new DelegatingAnalyzerWrapper(delegate.getReuseStrategy()) {
          @Override
          protected Analyzer getWrappedAnalyzer(String fieldName) {
            return delegate;
          }

          @Override
          public int getPositionIncrementGap(String fieldName) {
            return positionGap;
          }

          @Override
          public int getOffsetGap(String fieldName) {
            return offsetGap;
          }
        };

    final RandomIndexWriter writer = new RandomIndexWriter(random(), newDirectory(), a);
    final Document doc = new Document();
    final FieldType ft = new FieldType();
    ft.setIndexOptions(IndexOptions.DOCS);
    ft.setTokenized(true);
    ft.setStoreTermVectors(true);
    ft.setStoreTermVectorPositions(true);
    ft.setStoreTermVectorOffsets(true);
    doc.add(new Field("f", "a", ft));
    doc.add(new Field("f", "a", ft));
    writer.addDocument(doc);
    final LeafReader reader = getOnlySegmentReader(writer.getReader());
    final Fields fields = reader.getTermVectors(0);
    final Terms terms = fields.terms("f");
    final TermsEnum te = terms.iterator();
    assertEquals(new BytesRef("a"), te.next());
    final PostingsEnum dpe = te.postings(null, PostingsEnum.ALL);
    assertEquals(0, dpe.nextDoc());
    assertEquals(2, dpe.freq());
    assertEquals(0, dpe.nextPosition());
    assertEquals(0, dpe.startOffset());
    final int endOffset = dpe.endOffset();
    assertEquals(1 + positionGap, dpe.nextPosition());
    assertEquals(1 + endOffset + offsetGap, dpe.endOffset());
    assertEquals(null, te.next());
    reader.close();
    writer.close();
    writer.w.getDirectory().close();
  }
コード例 #5
0
ファイル: QueryExplainer.java プロジェクト: Ravion/Presto-1
  private Plan getLogicalPlan(Statement statement) {
    // analyze statement
    Analyzer analyzer =
        new Analyzer(session, metadata, Optional.of(this), experimentalSyntaxEnabled);

    Analysis analysis = analyzer.analyze(statement);
    PlanNodeIdAllocator idAllocator = new PlanNodeIdAllocator();

    // plan statement
    LogicalPlanner logicalPlanner =
        new LogicalPlanner(session, planOptimizers, idAllocator, metadata);
    return logicalPlanner.plan(analysis);
  }
コード例 #6
0
ファイル: TestMockAnalyzer.java プロジェクト: PATRIC3/p3_solr
  public void testLUCENE_3042() throws Exception {
    String testString = "t";

    Analyzer analyzer = new MockAnalyzer(random());
    try (TokenStream stream = analyzer.tokenStream("dummy", testString)) {
      stream.reset();
      while (stream.incrementToken()) {
        // consume
      }
      stream.end();
    }

    assertAnalyzesTo(analyzer, testString, new String[] {"t"});
  }
コード例 #7
0
ファイル: TestMockAnalyzer.java プロジェクト: PATRIC3/p3_solr
 /** Test a configuration where three characters makes a term */
 public void testThreeChars() throws Exception {
   CharacterRunAutomaton single = new CharacterRunAutomaton(new RegExp("...").toAutomaton());
   Analyzer a = new MockAnalyzer(random(), single, false);
   assertAnalyzesTo(a, "foobar", new String[] {"foo", "bar"}, new int[] {0, 3}, new int[] {3, 6});
   // make sure when last term is a "partial" match that end() is correct
   assertTokenStreamContents(
       a.tokenStream("bogus", "fooba"),
       new String[] {"foo"},
       new int[] {0},
       new int[] {3},
       new int[] {1},
       new Integer(5));
   checkRandomData(random(), a, 100);
 }
コード例 #8
0
ファイル: QueryExplainer.java プロジェクト: Ravion/Presto-1
  private SubPlan getDistributedPlan(Statement statement) {
    // analyze statement
    Analyzer analyzer =
        new Analyzer(session, metadata, Optional.of(this), experimentalSyntaxEnabled);

    Analysis analysis = analyzer.analyze(statement);
    PlanNodeIdAllocator idAllocator = new PlanNodeIdAllocator();

    // plan statement
    LogicalPlanner logicalPlanner =
        new LogicalPlanner(session, planOptimizers, idAllocator, metadata);
    Plan plan = logicalPlanner.plan(analysis);

    return new DistributedLogicalPlanner(metadata, idAllocator).createSubPlans(plan, false);
  }
コード例 #9
0
ファイル: SelectStmt.java プロジェクト: vthacker/impala-port
 /**
  * Expand "<tbl>.*" select list item.
  *
  * @param analyzer
  * @param tblName
  * @throws AnalysisException
  */
 private void expandStar(Analyzer analyzer, TableName tblName) throws AnalysisException {
   TupleDescriptor d = analyzer.getDescriptor(tblName);
   if (d == null) {
     throw new AnalysisException("unknown table: " + tblName.toString());
   }
   expandStar(analyzer, tblName.toString(), d);
 }
コード例 #10
0
ファイル: SortInfo.java プロジェクト: jackerxff/Impala-1
 /**
  * Materializes the slots in sortTupleDesc_ referenced in the ordering exprs. Materializes the
  * slots referenced by the corresponding sortTupleSlotExpr after applying the 'smap'.
  */
 public void materializeRequiredSlots(Analyzer analyzer, ExprSubstitutionMap smap)
     throws InternalException {
   Preconditions.checkNotNull(sortTupleDesc_);
   Preconditions.checkNotNull(sortTupleSlotExprs_);
   Preconditions.checkState(sortTupleDesc_.getIsMaterialized());
   analyzer.materializeSlots(orderingExprs_);
   List<SlotDescriptor> sortTupleSlotDescs = sortTupleDesc_.getSlots();
   List<Expr> materializedExprs = Lists.newArrayList();
   for (int i = 0; i < sortTupleSlotDescs.size(); ++i) {
     if (sortTupleSlotDescs.get(i).isMaterialized()) {
       materializedExprs.add(sortTupleSlotExprs_.get(i));
     }
   }
   List<Expr> substMaterializedExprs = Expr.substituteList(materializedExprs, smap, analyzer);
   analyzer.materializeSlots(substMaterializedExprs);
 }
コード例 #11
0
ファイル: AnalyzerTest.java プロジェクト: jmisur/jacoco
 @Test
 public void testAnalyzeAll4() throws IOException {
   createClassfile("bin1", AnalyzerTest.class);
   final int count = analyzer.analyzeAll(folder.getRoot());
   assertEquals(1, count);
   assertEquals(Collections.singleton("org/jacoco/core/analysis/AnalyzerTest"), classes);
 }
コード例 #12
0
ファイル: RunAgain.java プロジェクト: matsim-org/matsim
  public static void main(String[] args) throws IOException {

    Config c = ConfigUtils.createConfig();
    ConfigUtils.loadConfig(c, RAW_INPUT + "output_config.xml.gz");

    Scenario sc = ScenarioUtils.createScenario(c);
    //		Network net = sc.getNetwork();
    new MatsimNetworkReader(sc.getNetwork()).readFile(RAW_INPUT + "/output_network.xml.gz");
    new PopulationReader(sc).readFile(RAW_INPUT + "output_plans.xml.gz");
    //		dropDepTimes(sc.getPopulation());

    c.controler().setOutputDirectory(NEW_DIR + "output/");
    c.network().setInputFile(NEW_DIR + "input/network.xml.gz");
    c.plans().setInputFile(NEW_DIR + "input/population.xml.gz");

    new NetworkWriter(sc.getNetwork()).write(c.network().getInputFile());
    new PopulationWriter(sc.getPopulation(), sc.getNetwork()).write(c.plans().getInputFile());
    new ConfigWriter(c).write(NEW_DIR + "input/config.xml");

    c.controler().setLastIteration(0);

    Controler cntr = new Controler(sc);
    cntr.getConfig()
        .controler()
        .setOverwriteFileSetting(
            OutputDirectoryHierarchy.OverwriteFileSetting.overwriteExistingFiles);
    cntr.run();

    Analyzer.main(
        new String[] {
          NEW_DIR + "/output/ITERS/it.0/0.events.xml.gz",
          "/Users/laemmel/arbeit/papers/2015/TRBwFZJ/hybridsim_trb2016/analysis/runagain-vehicles_plot_data"
        });
  }
コード例 #13
0
ファイル: Main.java プロジェクト: darzul/rdn
  private static void prepare() {
    List<String[]> data = new LinkedList<>();
    data.addAll(Parser.parse("/home/desktop/Documents/Git/fann/project/garcon.csv", ";"));
    data.addAll(Parser.parse("/home/desktop/Documents/Git/fann/project/fille.csv", ";"));
    Person[] persons = Person.buildFrom(data);

    Shuffler<Person> shuffler = new Shuffler<>();
    shuffler.shuffle(persons);

    String[] names = new String[persons.length];
    for (int line = 0; line < persons.length; line++) {
      Person person = persons[line];
      names[line] = person.getName();
    }

    char[] charset = {
      'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S',
      'T', 'U', 'V', 'W', 'X', 'Y', 'Z'
    };
    int[][] charsFrequency = Analyzer.computeCharsFrequency(names, charset);
    float[][] charsFrequencyNormalized = Analyzer.normalize(charsFrequency, 0, 1);
    //        float[][] firstChar = Analyzer.computeFirstChar(names, charset);
    //        float[][] lastChar = Analyzer.computeLastChar(names, charset);
    //        float[][] twoLastCharsWithoutOrder = Analyzer.computeTwoLastCharWithoutOrder(names,
    // charset);
    float[][] twoLastCharsWithOrder = Analyzer.computeTwoLastCharWithOrder(names, charset);
    for (int line = 0; line < persons.length; line++) {
      Person person = persons[line];
      person.addInputs(charsFrequencyNormalized[line]);
      //            person.addInputs(firstChar[line]);
      //            person.addInputs(lastChar[line]);
      //            person.addInputs(twoLastCharsWithoutOrder[line]);
      person.addInputs(twoLastCharsWithOrder[line]);
    }

    Splitter<Person> splitter = new Splitter<>();
    List<Person[]> bases = splitter.split(Person.class, persons, 0.2f);

    Person[] trainingBase = bases.get(0);
    Person[] testingBase = bases.get(1);

    Converter.toFile(
        "/home/desktop/Documents/Git/fann/project/base/training_base.txt", trainingBase, ' ');
    Converter.toFile(
        "/home/desktop/Documents/Git/fann/project/base/testing_base.txt", testingBase, ' ');
  }
コード例 #14
0
ファイル: SelectStmt.java プロジェクト: nealsid/Impala
 /** Expand "<tbl>.*" select list item. */
 private void expandStar(Analyzer analyzer, TableName tblName)
     throws AnalysisException, AuthorizationException {
   TupleDescriptor tupleDesc = analyzer.getDescriptor(tblName);
   if (tupleDesc == null) {
     throw new AnalysisException("unknown table: " + tblName.toString());
   }
   expandStar(analyzer, tblName, tupleDesc);
 }
コード例 #15
0
 @Override
 protected TokenStreamComponents createComponents(String fieldName) {
   TokenStreamComponents createComponents = analyzer.createComponents(fieldName);
   TokenStream stream =
       new PrefixTokenFilter(createComponents.getTokenStream(), separator, prefix);
   TokenStreamComponents tsc = new TokenStreamComponents(createComponents.getTokenizer(), stream);
   return tsc;
 }
コード例 #16
0
ファイル: AnalyzerTest.java プロジェクト: jmisur/jacoco
 @Test(expected = IOException.class)
 public void testAnalyzeAll6() throws IOException {
   File file = new File(folder.getRoot(), "broken.zip");
   OutputStream out = new FileOutputStream(file);
   ZipOutputStream zip = new ZipOutputStream(out);
   zip.putNextEntry(new ZipEntry("brokenentry.txt"));
   out.write(0x23); // Unexpected data here
   zip.close();
   analyzer.analyzeAll(file);
 }
コード例 #17
0
    /** Visit the specified grammar. */
    public void visit(Module m) {
      // Initialize the per-grammar state.
      analyzer.register(this);
      analyzer.init(m);

      // Process the productions.
      for (Production p : m.productions) {
        // Make sure that the production has not been processed
        // already and that it returns a string.
        if (analyzer.isProcessed(p.qName)) {
          continue;
        } else if (p.getBooleanProperty(Properties.TEXT_ONLY)) {
          mark(p);
          analyzer.processed(p.qName);
          continue;
        } else if (!AST.isVoid(p.type)) {
          analyzer.processed(p.qName);
          continue;
        }

        // Clear the per-production state.
        isLexical = true;

        // Process the production.
        analyzer.process(p);

        // Tabulate the results.
        if (isLexical) {
          // All visited productions are guaranteed to be lexical.
          for (NonTerminal nt : analyzer.working()) {
            // This lookup is guaranteed to work, as the production's
            // fully qualified name was added by visit(Production).
            Production p2 = analyzer.lookup(nt);
            mark(p2);
            analyzer.processed(p2.qName);
          }

        } else {
          // We only know that the current production is not lexical.
          analyzer.processed(p.qName);
        }
      }
    }
コード例 #18
0
ファイル: SelectStmt.java プロジェクト: nealsid/Impala
  /** Marks all unassigned join predicates as well as exprs in aggInfo and sortInfo. */
  @Override
  public void materializeRequiredSlots(Analyzer analyzer) {
    // Mark unassigned join predicates. Some predicates that must be evaluated by a join
    // can also be safely evaluated below the join (picked up by getBoundPredicates()).
    // Such predicates will be marked twice and that is ok.
    List<Expr> unassigned = analyzer.getUnassignedConjuncts(getTableRefIds(), true);
    List<Expr> unassignedJoinConjuncts = Lists.newArrayList();
    for (Expr e : unassigned) {
      if (analyzer.evalByJoin(e)) unassignedJoinConjuncts.add(e);
    }
    List<Expr> baseTblJoinConjuncts = Expr.cloneList(unassignedJoinConjuncts, baseTblSmap_);
    materializeSlots(analyzer, baseTblJoinConjuncts);

    if (sortInfo_ != null) {
      // mark ordering exprs before marking agg exprs because the ordering exprs
      // may contain agg exprs that are not referenced anywhere but the ORDER BY clause
      List<Expr> resolvedExprs = Expr.cloneList(sortInfo_.getOrderingExprs(), baseTblSmap_);
      materializeSlots(analyzer, resolvedExprs);
    }

    if (aggInfo_ != null) {
      // mark all agg exprs needed for HAVING pred and binding predicates as materialized
      // before calling AggregateInfo.materializeRequiredSlots(), otherwise they won't
      // show up in AggregateInfo.getMaterializedAggregateExprs()
      ArrayList<Expr> havingConjuncts = Lists.newArrayList();
      if (havingPred_ != null) havingConjuncts.add(havingPred_);
      // Ignore predicates bound to a group-by slot because those
      // are already evaluated below this agg node (e.g., in a scan).
      Set<SlotId> groupBySlots = Sets.newHashSet();
      for (int i = 0; i < aggInfo_.getGroupingExprs().size(); ++i) {
        groupBySlots.add(aggInfo_.getAggTupleDesc().getSlots().get(i).getId());
      }
      // Binding predicates are assigned to the final output tuple of the aggregation,
      // which is the tuple of the 2nd phase agg for distinct aggs.
      ArrayList<Expr> bindingPredicates =
          analyzer.getBoundPredicates(aggInfo_.getOutputTupleId(), groupBySlots);
      havingConjuncts.addAll(bindingPredicates);
      havingConjuncts.addAll(
          analyzer.getUnassignedConjuncts(aggInfo_.getOutputTupleId().asList(), false));
      materializeSlots(analyzer, havingConjuncts);
      aggInfo_.materializeRequiredSlots(analyzer, baseTblSmap_);
    }
  }
コード例 #19
0
ファイル: TestMockAnalyzer.java プロジェクト: PATRIC3/p3_solr
 /** blast some random strings through differently configured tokenizers */
 public void testRandomRegexps() throws Exception {
   int iters = TEST_NIGHTLY ? atLeast(30) : atLeast(1);
   for (int i = 0; i < iters; i++) {
     final CharacterRunAutomaton dfa =
         new CharacterRunAutomaton(AutomatonTestUtil.randomAutomaton(random()), Integer.MAX_VALUE);
     final boolean lowercase = random().nextBoolean();
     final int limit = TestUtil.nextInt(random(), 0, 500);
     Analyzer a =
         new Analyzer() {
           @Override
           protected TokenStreamComponents createComponents(String fieldName) {
             Tokenizer t = new MockTokenizer(dfa, lowercase, limit);
             return new TokenStreamComponents(t, t);
           }
         };
     checkRandomData(random(), a, 100);
     a.close();
   }
 }
コード例 #20
0
ファイル: SelectStmt.java プロジェクト: vthacker/impala-port
  @Override
  public void analyze(Analyzer analyzer) throws AnalysisException, InternalException {
    // start out with table refs to establish aliases
    TableRef leftTblRef = null; // the one to the left of tblRef
    for (TableRef tblRef : tableRefs) {
      tblRef.setLeftTblRef(leftTblRef);
      tblRef.analyze(analyzer);
      leftTblRef = tblRef;
    }

    // populate selectListExprs, aliasSMap, and colNames
    for (SelectListItem item : selectList.getItems()) {
      if (item.isStar()) {
        TableName tblName = item.getTblName();
        if (tblName == null) {
          expandStar(analyzer);
        } else {
          expandStar(analyzer, tblName);
        }
      } else {
        resultExprs.add(item.getExpr());
        SlotRef aliasRef = new SlotRef(null, item.toColumnLabel());
        if (aliasSMap.lhs.contains(aliasRef)) {
          // If we have already seen this alias, it refers to more than one column and
          // therefore is ambiguous.
          ambiguousAliasList.add(aliasRef);
        }
        aliasSMap.lhs.add(aliasRef);
        aliasSMap.rhs.add(item.getExpr().clone(null));
        colLabels.add(item.toColumnLabel());
      }
    }

    // analyze selectListExprs
    Expr.analyze(resultExprs, analyzer);

    if (whereClause != null) {
      whereClause.analyze(analyzer);
      if (whereClause.contains(AggregateExpr.class)) {
        throw new AnalysisException("aggregation function not allowed in WHERE clause");
      }
      whereClause.checkReturnsBool("WHERE clause", false);
      analyzer.registerConjuncts(whereClause, null, true);
    }

    createSortInfo(analyzer);
    analyzeAggregation(analyzer);

    // Substitute expressions to the underlying inline view expressions
    substituteInlineViewExprs(analyzer);

    if (aggInfo != null) {
      LOG.debug("post-analysis " + aggInfo.debugString());
    }
  }
コード例 #21
0
ファイル: BaseTableRef.java プロジェクト: hmncyj/Impala
 /** Register this table ref and then analyze the Join clause. */
 @Override
 public void analyze(Analyzer analyzer) throws AnalysisException, AuthorizationException {
   Preconditions.checkNotNull(getPrivilegeRequirement());
   desc_ = analyzer.registerBaseTableRef(this);
   isAnalyzed_ = true; // true that we have assigned desc
   try {
     analyzeJoin(analyzer);
   } catch (InternalException e) {
     throw new AnalysisException(e.getMessage(), e);
   }
 }
コード例 #22
0
ファイル: AnalyzerTest.java プロジェクト: jmisur/jacoco
 @Test
 public void testAnalyzeAll2() throws IOException {
   final ByteArrayOutputStream buffer = new ByteArrayOutputStream();
   final ZipOutputStream zip = new ZipOutputStream(buffer);
   zip.putNextEntry(new ZipEntry("org/jacoco/core/analysis/AnalyzerTest.class"));
   zip.write(TargetLoader.getClassDataAsBytes(AnalyzerTest.class));
   zip.finish();
   final int count = analyzer.analyzeAll(new ByteArrayInputStream(buffer.toByteArray()));
   assertEquals(1, count);
   assertEquals(Collections.singleton("org/jacoco/core/analysis/AnalyzerTest"), classes);
 }
コード例 #23
0
ファイル: TestMockAnalyzer.java プロジェクト: PATRIC3/p3_solr
  public void testWrapReader() throws Exception {
    // LUCENE-5153: test that wrapping an analyzer's reader is allowed
    final Random random = random();

    final Analyzer delegate = new MockAnalyzer(random);
    Analyzer a =
        new AnalyzerWrapper(delegate.getReuseStrategy()) {

          @Override
          protected Reader wrapReader(String fieldName, Reader reader) {
            return new MockCharFilter(reader, 7);
          }

          @Override
          protected Analyzer getWrappedAnalyzer(String fieldName) {
            return delegate;
          }
        };

    checkOneTerm(a, "abc", "aabc");
  }
コード例 #24
0
  @Override
  public void analyze(Analyzer analyzer) throws AnalysisException, AuthorizationException {
    // For now, if authorization is enabled, the user needs ALL on the server
    // to create functions.
    // TODO: this is not the right granularity but acceptable for now.
    analyzer.getCatalog().checkCreateDropFunctionAccess(analyzer.getUser());

    // Validate function name is legal
    fn_.getFunctionName().analyze(analyzer);

    // Validate DB is legal
    String dbName = analyzer.getTargetDbName(fn_.getFunctionName());
    fn_.getFunctionName().setDb(dbName);
    if (analyzer.getCatalog().getDb(dbName, analyzer.getUser(), Privilege.CREATE) == null) {
      throw new AnalysisException(Analyzer.DB_DOES_NOT_EXIST_ERROR_MSG + dbName);
    }
    Function existingFn =
        analyzer.getCatalog().getFunction(fn_, Function.CompareMode.IS_INDISTINGUISHABLE);
    if (existingFn != null && !ifNotExists_) {
      throw new AnalysisException(
          Analyzer.FN_ALREADY_EXISTS_ERROR_MSG + existingFn.signatureString());
    }

    fn_.getLocation().analyze(analyzer, Privilege.CREATE);

    // Check the file type from the binary type to infer the type of the UDA
    fn_.setBinaryType(getBinaryType());
  }
コード例 #25
0
ファイル: SelectStmt.java プロジェクト: nealsid/Impala
 /**
  * Replaces BaseTableRefs in tblRefs whose alias matches a view registered in the given analyzer
  * or its parent analyzers with a clone of the matching inline view. The cloned inline view
  * inherits the context-dependent attributes such as the on-clause, join hints, etc. from the
  * original BaseTableRef.
  *
  * <p>Matches views from the inside out, i.e., we first look in this analyzer then in the
  * parentAnalyzer then and its parent, etc., and finally consult the catalog for matching views
  * (the global scope).
  *
  * <p>This method is used for substituting views from WITH clauses and views from the catalog.
  */
 public void substituteViews(Analyzer analyzer, List<TableRef> tblRefs)
     throws AuthorizationException, AnalysisException {
   for (int i = 0; i < tblRefs.size(); ++i) {
     if (!(tblRefs.get(i) instanceof BaseTableRef)) continue;
     BaseTableRef tblRef = (BaseTableRef) tblRefs.get(i);
     ViewRef viewDefinition = analyzer.findViewDefinition(tblRef, true);
     if (viewDefinition == null) continue;
     // Instantiate the view to replace the original BaseTableRef.
     ViewRef viewRef = viewDefinition.instantiate(tblRef);
     viewRef.getViewStmt().setIsExplain(isExplain_);
     tblRefs.set(i, viewRef);
   }
 }
コード例 #26
0
ファイル: AnalyzerTest.java プロジェクト: jmisur/jacoco
 @Test
 public void testAnalyzeAll5() throws IOException {
   createClassfile("bin1", Analyzer.class);
   createClassfile("bin2", AnalyzerTest.class);
   String path = "bin1" + File.pathSeparator + "bin2";
   final int count = analyzer.analyzeAll(path, folder.getRoot());
   assertEquals(2, count);
   assertEquals(
       new HashSet<String>(
           Arrays.asList(
               "org/jacoco/core/analysis/Analyzer", "org/jacoco/core/analysis/AnalyzerTest")),
       classes);
 }
コード例 #27
0
ファイル: SlotRef.java プロジェクト: nimbusrepo/impala
 @Override
 public void analyze(Analyzer analyzer) throws AnalysisException {
   if (isAnalyzed) {
     return;
   }
   super.analyze(analyzer);
   desc = analyzer.registerColumnRef(tblName, col);
   type = desc.getType();
   if (!type.isSupported()) {
     throw new AnalysisException(
         "Unsupported type '" + type.toString() + "' in '" + toSql() + "'.");
   }
   numDistinctValues = desc.getStats().getNumDistinctValues();
 }
コード例 #28
0
ファイル: TestMockAnalyzer.java プロジェクト: PATRIC3/p3_solr
  /** Test MockTokenizer encountering a too long token */
  public void testTooLongToken() throws Exception {
    Analyzer whitespace =
        new Analyzer() {
          @Override
          protected TokenStreamComponents createComponents(String fieldName) {
            Tokenizer t = new MockTokenizer(MockTokenizer.WHITESPACE, false, 5);
            return new TokenStreamComponents(t, t);
          }
        };

    assertTokenStreamContents(
        whitespace.tokenStream("bogus", "test 123 toolong ok "),
        new String[] {"test", "123", "toolo", "ng", "ok"},
        new int[] {0, 5, 9, 14, 17},
        new int[] {4, 8, 14, 16, 19},
        new Integer(20));

    assertTokenStreamContents(
        whitespace.tokenStream("bogus", "test 123 toolo"),
        new String[] {"test", "123", "toolo"},
        new int[] {0, 5, 9},
        new int[] {4, 8, 14},
        new Integer(14));
  }
コード例 #29
0
  public void assertThreadSafe(final Analyzer analyzer) throws Exception {
    int numTestPoints = 100;
    int numThreads = _TestUtil.nextInt(random, 3, 5);
    final HashMap<String, String> map = new HashMap<String, String>();

    // create a map<String,SortKey> up front.
    // then with multiple threads, generate sort keys for all the keys in the map
    // and ensure they are the same as the ones we produced in serial fashion.

    for (int i = 0; i < numTestPoints; i++) {
      String term = _TestUtil.randomSimpleString(random);
      TokenStream ts = analyzer.reusableTokenStream("fake", new StringReader(term));
      CharTermAttribute encodedBytes = ts.addAttribute(CharTermAttribute.class);
      ts.reset();
      assertTrue(ts.incrementToken());
      // ensure we make a copy of the actual bytes too
      map.put(term, encodedBytes.toString());
    }

    Thread threads[] = new Thread[numThreads];
    for (int i = 0; i < numThreads; i++) {
      threads[i] =
          new Thread() {
            @Override
            public void run() {
              try {
                for (Map.Entry<String, String> mapping : map.entrySet()) {
                  String term = mapping.getKey();
                  String expected = mapping.getValue();
                  TokenStream ts = analyzer.reusableTokenStream("fake", new StringReader(term));
                  CharTermAttribute encodedBytes = ts.addAttribute(CharTermAttribute.class);
                  ts.reset();
                  assertTrue(ts.incrementToken());
                  assertEquals(expected, encodedBytes.toString());
                }
              } catch (IOException e) {
                throw new RuntimeException(e);
              }
            }
          };
    }
    for (int i = 0; i < numThreads; i++) {
      threads[i].start();
    }
    for (int i = 0; i < numThreads; i++) {
      threads[i].join();
    }
  }
コード例 #30
0
  @Override
  public void analyze(Analyzer analyzer) throws AnalysisException {
    if (isAnalyzed_) return;
    super.analyze(analyzer);
    analyzer.castAllToCompatibleType(originalChildren_);

    // TODO: improve with histograms
    selectivity_ = Expr.DEFAULT_SELECTIVITY;

    // Rewrite between predicate into a conjunctive/disjunctive compound predicate.
    if (isNotBetween_) {
      // Rewrite into disjunction.
      Predicate lower =
          new BinaryPredicate(
              BinaryPredicate.Operator.LT, originalChildren_.get(0), originalChildren_.get(1));
      Predicate upper =
          new BinaryPredicate(
              BinaryPredicate.Operator.GT, originalChildren_.get(0), originalChildren_.get(2));
      rewrittenPredicate_ = new CompoundPredicate(CompoundPredicate.Operator.OR, lower, upper);
    } else {
      // Rewrite into conjunction.
      Predicate lower =
          new BinaryPredicate(
              BinaryPredicate.Operator.GE, originalChildren_.get(0), originalChildren_.get(1));
      Predicate upper =
          new BinaryPredicate(
              BinaryPredicate.Operator.LE, originalChildren_.get(0), originalChildren_.get(2));
      rewrittenPredicate_ = new CompoundPredicate(CompoundPredicate.Operator.AND, lower, upper);
    }

    try {
      rewrittenPredicate_.analyze(analyzer);
      fn_ = rewrittenPredicate_.fn_;
    } catch (AnalysisException e) {
      // We should have already guaranteed that analysis will succeed.
      Preconditions.checkState(false, "Analysis failed in rewritten between predicate");
    }

    // Make sure toThrift() picks up the children of the rewritten predicate.
    children_ = rewrittenPredicate_.getChildren();
  }