예제 #1
0
  public static void createAggregation(String aggName, String schemaName, String... urls) {
    StreamAggregation s = NoSql.em().find(StreamAggregation.class, aggName);
    if (s == null) {
      s = new StreamAggregation();
    } else {
      if (!s.getSchema().getName().equals(schemaName))
        throw new RuntimeException(
            "Aggregation name=" + aggName + " is already in use under group=" + schemaName);
    }

    SecureSchema schema = SecureSchema.findByName(NoSql.em(), schemaName);

    s.setName(aggName);
    List<String> streamUrls = s.getUrls();
    streamUrls.clear();

    for (String url : urls) {
      streamUrls.add(url);
    }

    s.setSchema(schema);
    NoSql.em().put(s);
    NoSql.em().put(schema);
    NoSql.em().flush();
  }
예제 #2
0
 public static void indexSchemas()
     throws IOException, SAXException, ParserConfigurationException, SolrServerException {
   List<SecureSchema> schemas = SecureSchema.findAll(NoSql.em());
   for (SecureSchema schema : schemas) {
     SearchUtils.indexSchema(schema);
   }
 }
예제 #3
0
  public static void addChartToDashboard(String chartId, String encoded) {
    // make sure chart exists...
    ChartUtil.fetchChart(chartId);
    ChartDbo chart = new ChartDbo();
    chart.setChartId(chartId);
    chart.setEncodedVariables(encoded);
    // he could create multiples of the same chart so just timestamp it as he would not
    // be fast enough to create ones with an id clash...
    chart.setId(chartId + System.currentTimeMillis());

    EntityUser user = Utility.getCurrentUser(session);
    List<ChartDbo> charts = user.getCharts();
    charts.add(chart);
    NoSql.em().put(user);
    NoSql.em().flush();

    Settings.charts();
  }
예제 #4
0
  @Util
  public static void reindex()
      throws IOException, SAXException, ParserConfigurationException, SolrServerException {

    deleteExistingCores();
    indexSchemas();

    Cursor<KeyValue<SecureTable>> tablesCursor = SecureTable.findAllCursor(NoSql.em());
    int i = 0;
    // very important to ignore any error and continue indexing.  If an index has gotten
    // corrupted (which happens...) this will throw com.alvazan.orm.api.exc.RowNotFoundException
    // and will kill the entire reindex.
    long docsindexed = 0;
    long startTime = System.currentTimeMillis() - 1;
    Collection<SolrInputDocument> solrDocs = new ArrayList<SolrInputDocument>();
    while (tablesCursor.next()) {
      if (++i % 200 == 0) NoSql.em().clear();
      KeyValue<SecureTable> kv = tablesCursor.getCurrent();

      try {
        if (kv.getValue() == null) continue;
      } catch (RowNotFoundException rnfe) {
        if (log.isInfoEnabled())
          log.error(
              "got a corrupt index while reindexing, ignoring the error and continuing with indexing of other data.");
        // rnfe.printStackTrace();
        continue;
      }
      SecureTable table = kv.getValue();
      DboTableMeta meta = table.getTableMeta();

      SearchUtils.indexTable(table, meta, solrDocs);

      if (table.isSearchable()) {
        log.info("found a searchable table " + table.getName() + " indexing it.");
        String sql = "select c from " + table.getTableName() + " as c";
        Collection<SolrInputDocument> tablesolrDocs = new ArrayList<SolrInputDocument>();

        try {
          QueryResult result =
              NoSql.em().getTypedSession().createQueryCursor(sql, SqlPullProcessor.BATCH_SIZE);
          Iterator<List<TypedRow>> cursor = result.getAllViewsIter().iterator();

          while (true) {
            // I hate this, but cursor.hasNext() can throw an exception which means we need to skip
            // over
            // that item but continue on with the cursor till it runs out:
            List<TypedRow> typedRows = getNext(cursor);
            if (typedRows == null) break;
            for (TypedRow prow : typedRows) {
              SearchPosting.addSolrDataDoc(prow, table, tablesolrDocs);
            }
            if (tablesolrDocs.size() > REINDEX_BATCH_SIZE) {
              docsindexed += solrDocs.size();
              System.out.println(
                  "hit solr doc batch size in a searchable table, "
                      + docsindexed
                      + " docs so far, "
                      + (System.currentTimeMillis() - startTime)
                      + " millis elapsed "
                      + (docsindexed / ((System.currentTimeMillis() - startTime) / 1000))
                      + " docs per sec.");
              SearchPosting.saveSolr("reindex", tablesolrDocs, null);
              tablesolrDocs = new ArrayList<SolrInputDocument>();
            }
          }
          SearchPosting.saveSolr("reindex", tablesolrDocs, null);
          docsindexed += solrDocs.size();
        } catch (Exception e) {
          System.out.println(
              "got an exception while indexing a searchable table with the query (probably a corrupt index in playorm):");
          System.out.println(sql);
          // e.printStackTrace();
        }
      }
      if (solrDocs.size() > REINDEX_BATCH_SIZE) {
        docsindexed += solrDocs.size();
        System.out.println(
            "hit solr doc batch size in metadata, "
                + docsindexed
                + " docs so far, "
                + (System.currentTimeMillis() - startTime)
                + " millis elapsed "
                + (docsindexed / ((System.currentTimeMillis() - startTime) / 1000))
                + " docs per sec.");
        SearchPosting.saveSolr("reindex", solrDocs, "databusmeta");
        solrDocs = new ArrayList<SolrInputDocument>();
      }
    }
    if (solrDocs.size() > 0) {
      docsindexed += solrDocs.size();
      System.out.println(
          "hit solr doc batch size during finalization, "
              + docsindexed
              + " docs so far, "
              + (System.currentTimeMillis() - startTime)
              + " millis elapsed "
              + (docsindexed / ((System.currentTimeMillis() - startTime) / 1000))
              + " docs per sec.");
      SearchPosting.saveSolr("reindex", solrDocs, "databusmeta");
      solrDocs = new ArrayList<SolrInputDocument>();
    }
  }
예제 #5
0
  public static void createStuff() {

    NoSql.em().put(DEMO);

    NoSql.em().flush();

    StartupUtils.createDatabase(NoSql.em(), DEMO, GROUPDEMO, StartupDetailed.JUSTIN);

    NoSql.em().flush();

    createSeriesImplWithPrettyDemoData(
        DEMO_TIME_SOLAR1,
        DEMO_DATA_TIME_BACK,
        DEMO_DATA_TIME_FORWARD,
        -DEMO_DATA_INTERVAL_15MIN,
        GROUPDEMO,
        DEMO,
        RANDOMNESS_VERY_SLIGHT,
        2000,
        FREQUENCY_HIGH,
        DROP_BELOW_ZERO);
    createSeriesImplWithPrettyDemoData(
        DEMO_TIME_SOLAR2,
        DEMO_DATA_TIME_BACK,
        DEMO_DATA_TIME_FORWARD,
        DEMO_DATA_INTERVAL_HOURLY,
        GROUPDEMO,
        DEMO,
        RANDOMNESS_SLIGHT,
        600,
        FREQUENCY_HIGH,
        DROP_BELOW_ZERO);
    createSeriesImplWithPrettyDemoData(
        DEMO_TIME_SOLAR3,
        DEMO_DATA_TIME_BACK,
        DEMO_DATA_TIME_FORWARD,
        DEMO_DATA_INTERVAL_HOURLY,
        GROUPDEMO,
        DEMO,
        RANDOMNESS_VERY_SLIGHT,
        500,
        FREQUENCY_HIGH,
        DROP_BELOW_ZERO);
    createSeriesImplWithPrettyDemoData(
        DEMO_TIME_SOLAR4,
        DEMO_DATA_TIME_BACK,
        DEMO_DATA_TIME_FORWARD,
        DEMO_DATA_INTERVAL_HOURLY,
        GROUPDEMO,
        DEMO,
        RANDOMNESS_SLIGHT,
        600,
        FREQUENCY_HIGH,
        DROP_BELOW_ZERO);
    createSeriesImplWithPrettyDemoData(
        DEMO_TIME_SOLAR5,
        DEMO_DATA_TIME_BACK,
        DEMO_DATA_TIME_FORWARD,
        DEMO_DATA_INTERVAL_HOURLY,
        GROUPDEMO,
        DEMO,
        RANDOMNESS_SLIGHT,
        1600,
        FREQUENCY_HIGH,
        DROP_BELOW_ZERO);
    createAggregation(
        "PVSolarArrayAggregation1",
        GROUPDEMO,
        URL_PREFIX_SPLINE + DEMO_TIME_SOLAR1,
        URL_PREFIX_SPLINE + DEMO_TIME_SOLAR2,
        URL_PREFIX_SPLINE + DEMO_TIME_SOLAR3,
        URL_PREFIX_SPLINE + DEMO_TIME_SOLAR4,
        URL_PREFIX_SPLINE + DEMO_TIME_SOLAR5);

    createSeriesImplWithPrettyDemoData(
        DEMO_TIME_SERIESMECH2,
        DEMO_DATA_TIME_BACK,
        DEMO_DATA_TIME_FORWARD,
        DEMO_DATA_INTERVAL_15MIN,
        GROUPDEMO,
        DEMO,
        RANDOMNESS_SLIGHT,
        500,
        FREQUENCY_LOW,
        DROP_BELOW_ZERO);
    createSeriesImplWithPrettyDemoData(
        DEMO_TIME_SERIESMECH3,
        DEMO_DATA_TIME_BACK,
        DEMO_DATA_TIME_FORWARD,
        DEMO_DATA_INTERVAL_15MIN,
        GROUPDEMO,
        DEMO,
        RANDOMNESS_SOME,
        3000,
        FREQUENCY_LOW,
        MIRROR_BELOW_ZERO);
    createSeriesImplWithPrettyDemoData(
        DEMO_TIME_SERIESMECH4,
        DEMO_DATA_TIME_BACK,
        DEMO_DATA_TIME_FORWARD,
        DEMO_DATA_INTERVAL_15MIN,
        GROUPDEMO,
        DEMO,
        RANDOMNESS_SOME,
        1000,
        FREQUENCY_HIGH,
        MIRROR_BELOW_ZERO);
    createSeriesImplWithPrettyDemoData(
        DEMO_TIME_SERIESMECH5,
        DEMO_DATA_TIME_BACK,
        DEMO_DATA_TIME_FORWARD,
        DEMO_DATA_INTERVAL_15MIN,
        GROUPDEMO,
        DEMO,
        RANDOMNESS_SOME,
        10000,
        FREQUENCY_HIGH,
        DROP_BELOW_ZERO);
    createSeriesImplWithPrettyDemoData(
        DEMO_TIME_SERIESMECH6,
        DEMO_DATA_TIME_BACK,
        DEMO_DATA_TIME_FORWARD,
        DEMO_DATA_INTERVAL_HOURLY,
        GROUPDEMO,
        DEMO,
        RANDOMNESS_SLIGHT,
        1600,
        FREQUENCY_HIGH,
        DROP_BELOW_ZERO);
    createSeriesImplWithPrettyDemoData(
        DEMO_TIME_SERIESMECH7,
        DEMO_DATA_TIME_BACK,
        DEMO_DATA_TIME_FORWARD,
        DEMO_DATA_INTERVAL_HOURLY,
        GROUPDEMO,
        DEMO,
        RANDOMNESS_SLIGHT,
        4000,
        FREQUENCY_HIGH,
        DROP_BELOW_ZERO);
    createAggregation(
        "BuildingMechanicalAggregation",
        GROUPDEMO,
        URL_PREFIX_SPLINE + DEMO_TIME_SERIESMECH2,
        URL_PREFIX_SPLINE + DEMO_TIME_SERIESMECH3,
        URL_PREFIX_SPLINE + DEMO_TIME_SERIESMECH4,
        URL_PREFIX_SPLINE + DEMO_TIME_SERIESMECH5,
        URL_PREFIX_SPLINE + DEMO_TIME_SERIESMECH6,
        URL_PREFIX_SPLINE + DEMO_TIME_SERIESMECH7);

    createSeriesImplWithPrettyDemoData(
        DEMO_TIME_SERIESHEAT2,
        DEMO_DATA_TIME_BACK,
        DEMO_DATA_TIME_FORWARD,
        DEMO_DATA_INTERVAL_HOURLY,
        GROUPDEMO,
        DEMO,
        RANDOMNESS_SLIGHT,
        500,
        FREQUENCY_HIGH,
        DROP_BELOW_ZERO);
    createSeriesImplWithPrettyDemoData(
        DEMO_TIME_SERIESHEAT3,
        DEMO_DATA_TIME_BACK,
        DEMO_DATA_TIME_FORWARD,
        DEMO_DATA_INTERVAL_HOURLY,
        GROUPDEMO,
        DEMO,
        RANDOMNESS_SOME,
        7000,
        FREQUENCY_LOW,
        MIRROR_BELOW_ZERO);
    createSeriesImplWithPrettyDemoData(
        DEMO_TIME_SERIESHEAT4,
        DEMO_DATA_TIME_BACK,
        DEMO_DATA_TIME_FORWARD,
        DEMO_DATA_INTERVAL_HOURLY,
        GROUPDEMO,
        DEMO,
        RANDOMNESS_SOME,
        1000,
        FREQUENCY_HIGH,
        MIRROR_BELOW_ZERO);

    createAggregation(
        "BuildingHeatingAggregation",
        GROUPDEMO,
        URL_PREFIX_SPLINE + DEMO_TIME_SERIESHEAT2,
        URL_PREFIX_SPLINE + DEMO_TIME_SERIESHEAT3,
        URL_PREFIX_SPLINE + DEMO_TIME_SERIESHEAT4);
  }