예제 #1
0
  /** Load input file */
  private void loadInputFile() {

    URI_ID = new TObjectIntHashMap<String>();
    // load uri--id from input file
    TextFileUtils.loadInputURIs(inputFile, URI_ID, false);
    logger.debug("Input URIs loading. " + URI_ID.size() + " URIs loaded.");
  }
예제 #2
0
  /** Start RDF triple extraction */
  private void run() {

    // get processors number for multi-threading
    int n_threads = Runtime.getRuntime().availableProcessors();
    n_threads = 4;
    logger.debug("Threads number: " + n_threads);

    ExecutorService executor;
    executor = Executors.newFixedThreadPool(n_threads);

    metadata_counter = new SynchronizedCounter();
    properties_counter = new SynchronizedCounter();
    metadata_index = new TObjectIntHashMap<String>();
    props_index = new TObjectIntHashMap<String>();

    logger.info("Risorse da interrogare: " + num_items);

    try {

      TextFileManager textWriter = null;
      if (outputTextFormat) textWriter = new TextFileManager(textFile);

      ItemFileManager fileManager = new ItemFileManager(metadataFile, ItemFileManager.WRITE);

      for (int i = 0; i < num_items; i++) {

        String uri = (String) URI_ID.keys()[i];

        Runnable worker;

        if (model == null) {
          // create worker thread - extraction from endpoint
          worker =
              new QueryExecutor(
                  uri,
                  URI_ID.get(uri),
                  props_index,
                  graphURI,
                  endpoint,
                  metadata_counter,
                  properties_counter,
                  metadata_index,
                  textWriter,
                  fileManager,
                  depth);
        } else {
          // create worker thread - extraction from tdb local dataset
          worker =
              new QueryExecutor(
                  uri,
                  URI_ID.get(uri),
                  props_index,
                  graphURI,
                  endpoint,
                  metadata_counter,
                  properties_counter,
                  metadata_index,
                  textWriter,
                  fileManager,
                  depth,
                  model);
        }

        executor.execute(worker);
      }

      // This will make the executor accept no new threads
      // and finish all existing threads in the queue
      executor.shutdown();
      // Wait until all threads are finish
      executor.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);

      if (textWriter != null) textWriter.close();

      fileManager.close();

    } catch (Exception e) {
      e.printStackTrace();
    }

    // write metadata index file
    TextFileUtils.writeData(metadataFile + "_index", metadata_index);
    // write metadata index file
    TextFileUtils.writeData("props_index", props_index);
  }