Ejemplo n.º 1
0
    public Void visitDir(AbstractFile dir) {

      // don't extract . and .. directories
      if (isDotDirectory(dir)) {
        return null;
      }

      dest.mkdir();

      try {
        int numProcessed = 0;
        // recurse on children
        for (Content child : dir.getChildren()) {
          java.io.File childFile = getFsContentDest(child);
          ExtractFscContentVisitor<T, V> childVisitor =
              new ExtractFscContentVisitor<>(childFile, progress, worker, false);
          // If this is the source directory of an extract it
          // will have a progress and worker, and will keep track
          // of the progress bar's progress
          if (worker != null && worker.isCancelled()) {
            break;
          }
          if (progress != null && source) {
            progress.progress(child.getName(), numProcessed);
          }
          child.accept(childVisitor);
          numProcessed++;
        }
      } catch (TskException ex) {
        logger.log(Level.SEVERE, "Trouble fetching children to extract.", ex); // NON-NLS
      }

      return null;
    }
Ejemplo n.º 2
0
  /*
   * Generate a scaled image
   */
  private static BufferedImage generateIcon(Content content, int iconSize) {

    InputStream inputStream = null;
    try {
      inputStream = new ReadContentInputStream(content);
      BufferedImage bi = ImageIO.read(inputStream);
      if (bi == null) {
        logger.log(Level.WARNING, "No image reader for file: " + content.getName()); // NON-NLS
        return null;
      }
      BufferedImage biScaled = ScalrWrapper.resizeFast(bi, iconSize);

      return biScaled;
    } catch (OutOfMemoryError e) {
      logger.log(
          Level.WARNING, "Could not scale image (too large): " + content.getName(), e); // NON-NLS
      return null;
    } catch (Exception e) {
      logger.log(Level.WARNING, "Could not scale image: " + content.getName(), e); // NON-NLS
      return null;
    } finally {
      if (inputStream != null) {
        try {
          inputStream.close();
        } catch (IOException ex) {
          logger.log(
              Level.WARNING,
              "Could not close input stream after resizing thumbnail: " + content.getName(),
              ex); // NON-NLS
        }
      }
    }
  }
Ejemplo n.º 3
0
  /**
   * Aggregate all the matches from visiting the children Content objects of a parent Content
   * object.
   *
   * @param parent A content object.
   * @return The child files of the content.
   */
  protected Collection<AbstractFile> getAllFromChildren(Content parent) {
    Collection<AbstractFile> all = new ArrayList<>();

    try {
      for (Content child : parent.getChildren()) {
        all.addAll(child.accept(this));
      }
    } catch (TskException ex) {
      logger.log(Level.SEVERE, "Error getting Content children", ex); // NON-NLS
    }

    return all;
  }
Ejemplo n.º 4
0
 /**
  * Gets the time zone(s) of the image(s) in this case.
  *
  * @return time zones the set of time zones
  */
 public Set<TimeZone> getTimeZone() {
   Set<TimeZone> timezones = new HashSet<TimeZone>();
   for (Content c : getRootObjects()) {
     try {
       final Image image = c.getImage();
       if (image != null) {
         timezones.add(TimeZone.getTimeZone(image.getTimeZone()));
       }
     } catch (TskException ex) {
       logger.log(Level.INFO, "Error getting time zones", ex);
     }
   }
   return timezones;
 }
  @Override
  public void setNode(Node selectedNode) {
    if (selectedNode != null) {
      Lookup lookup = selectedNode.getLookup();
      Content content = lookup.lookup(Content.class);
      if (content != null) {
        try {
          this.setDataView(content.getAllArtifacts(), 1);
        } catch (TskException ex) {
          logger.log(Level.WARNING, "Couldn't get artifacts: ", ex);
        }
        return;
      }
    }

    this.setDataView(new ArrayList<BlackboardArtifact>(), 1);
  }
Ejemplo n.º 6
0
 private static Image generateAndSaveIcon(Content content, int iconSize) {
   Image icon = null;
   try {
     icon = generateIcon(content, iconSize);
     if (icon == null) {
       return DEFAULT_ICON;
     } else {
       File f = getFile(content.getId());
       if (f.exists()) {
         f.delete();
       }
       ImageIO.write((BufferedImage) icon, "png", getFile(content.getId())); // NON-NLS
     }
   } catch (IOException ex) {
     logger.log(Level.WARNING, "Could not write cache thumbnail: " + content, ex); // NON-NLS
   }
   return icon;
 }
Ejemplo n.º 7
0
  /**
   * Check if Solr has extracted content for a given node
   *
   * @param node
   * @return true if Solr has content, else false
   */
  private boolean solrHasContent(Node node) {
    Content content = node.getLookup().lookup(Content.class);
    if (content == null) {
      return false;
    }

    final Server solrServer = KeywordSearch.getServer();

    final long contentID = content.getId();

    try {
      return solrServer.queryIsIndexed(contentID);
    } catch (NoOpenCoreException ex) {
      logger.log(Level.WARNING, "Couldn't determine whether content is supported.", ex);
      return false;
    } catch (SolrServerException ex) {
      logger.log(Level.WARNING, "Couldn't determine whether content is supported.", ex);
      return false;
    }
  }
Ejemplo n.º 8
0
  /**
   * Reads all the data from any content object and writes (extracts) it to a file.
   *
   * @param content Any content object.
   * @param outputFile Will be created if it doesn't exist, and overwritten if it does
   * @param progress progress bar handle to update, if available. null otherwise
   * @param worker the swing worker background thread the process runs within, or null, if in the
   *     main thread, used to handle task cancellation
   * @param source true if source file
   * @return number of bytes extracted
   * @throws IOException if file could not be written
   */
  public static <T> long writeToFile(
      Content content,
      java.io.File outputFile,
      ProgressHandle progress,
      Future<T> worker,
      boolean source)
      throws IOException {
    InputStream in = new ReadContentInputStream(content);

    // Get the unit size for a progress bar
    int unit = (int) (content.getSize() / 100);
    long totalRead = 0;

    try (FileOutputStream out = new FileOutputStream(outputFile, false)) {
      byte[] buffer = new byte[TO_FILE_BUFFER_SIZE];
      int len = in.read(buffer);
      while (len != -1) {
        // If there is a worker, check for a cancelation
        if (worker != null && worker.isCancelled()) {
          break;
        }
        out.write(buffer, 0, len);
        len = in.read(buffer);
        totalRead += len;
        // If there is a progress bar and this is the source file,
        // report any progress
        if (progress != null && source && totalRead >= TO_FILE_BUFFER_SIZE) {
          int totalProgress = (int) (totalRead / unit);
          progress.progress(content.getName(), totalProgress);
          // If it's not the source, just update the file being processed
        } else if (progress != null && !source) {
          progress.progress(content.getName());
        }
      }
    } finally {
      in.close();
    }
    return totalRead;
  }
Ejemplo n.º 9
0
  public static TimeZone getTimeZone(Content c) {

    try {
      if (!shouldDisplayTimesInLocalTime()) {
        return TimeZone.getTimeZone("GMT");
      } else {
        final Content dataSource = c.getDataSource();
        if ((dataSource != null) && (dataSource instanceof Image)) {
          Image image = (Image) dataSource;
          return TimeZone.getTimeZone(image.getTimeZone());
        } else {
          // case such as top level VirtualDirectory
          return TimeZone.getDefault();
        }
      }
    } catch (TskException ex) {
      return TimeZone.getDefault();
    }
  }
Ejemplo n.º 10
0
 /**
  * Get an icon of a specified size.
  *
  * @param content
  * @param iconSize
  * @return
  */
 public static Image getIcon(Content content, int iconSize) {
   Image icon;
   // If a thumbnail file is already saved locally
   File file = getFile(content.getId());
   if (file.exists()) {
     try {
       BufferedImage bicon = ImageIO.read(file);
       if (bicon == null) {
         icon = DEFAULT_ICON;
       } else if (bicon.getWidth() != iconSize) {
         icon = generateAndSaveIcon(content, iconSize);
       } else {
         icon = bicon;
       }
     } catch (IOException ex) {
       logger.log(Level.WARNING, "Error while reading image.", ex); // NON-NLS
       icon = DEFAULT_ICON;
     }
   } else { // Make a new icon
     icon = generateAndSaveIcon(content, iconSize);
   }
   return icon;
 }
Ejemplo n.º 11
0
 /**
  * Get the cached file of the icon. Generates the icon and its file if it doesn't already exist,
  * so this method guarantees to return a file that exists.
  *
  * @param content
  * @param iconSize
  * @return
  */
 public static File getIconFile(Content content, int iconSize) {
   if (getIcon(content, iconSize) != null) {
     return getFile(content.getId());
   }
   return null;
 }
Ejemplo n.º 12
0
 /**
  * IngestManager entry point, enqueues data to be processed and starts new ingest as needed, or
  * just enqueues data to an existing pipeline.
  *
  * <p>Spawns background thread which enumerates all sorted files and executes chosen modules per
  * file in a pre-determined order. Notifies modules when work is complete or should be interrupted
  * using complete() and stop() calls. Does not block and can be called multiple times to enqueue
  * more work to already running background ingest process.
  *
  * @param modules modules to execute on the data source input
  * @param input input data source Content objects to execute the ingest modules on
  */
 public void execute(final List<IngestModuleAbstract> modules, final Content input) {
   List<Content> inputs = new ArrayList<Content>();
   inputs.add(input);
   logger.log(Level.INFO, "Will enqueue input: " + input.getName());
   execute(modules, inputs);
 }
Ejemplo n.º 13
0
    private void queueAll(List<IngestModuleAbstract> modules, final List<Content> inputs) {

      int processed = 0;
      for (Content input : inputs) {
        final String inputName = input.getName();

        final List<IngestModuleDataSource> dataSourceMods = new ArrayList<IngestModuleDataSource>();
        final List<IngestModuleAbstractFile> fileMods = new ArrayList<IngestModuleAbstractFile>();

        for (IngestModuleAbstract module : modules) {
          if (isCancelled()) {
            logger.log(Level.INFO, "Terminating ingest queueing due to cancellation.");
            return;
          }

          final String moduleName = module.getName();
          progress.progress(moduleName + " " + inputName, processed);

          switch (module.getType()) {
            case DataSource:
              final IngestModuleDataSource newModuleInstance =
                  (IngestModuleDataSource) moduleLoader.getNewIngestModuleInstance(module);
              if (newModuleInstance != null) {
                dataSourceMods.add(newModuleInstance);
              } else {
                logger.log(
                    Level.INFO,
                    "Error loading module and adding input "
                        + inputName
                        + " with module "
                        + module.getName());
              }
              break;

            case AbstractFile:
              // enqueue the same singleton AbstractFile module
              logger.log(
                  Level.INFO,
                  "Adding input " + inputName + " for AbstractFileModule " + module.getName());

              fileMods.add((IngestModuleAbstractFile) module);
              break;

            default:
              logger.log(Level.SEVERE, "Unexpected module type: " + module.getType().name());
          }
        } // for modules

        // queue to schedulers

        // queue to datasource-level ingest pipeline(s)
        final boolean processUnalloc = getProcessUnallocSpace();
        final ScheduledTask<IngestModuleDataSource> dataSourceTask =
            new ScheduledTask<IngestModuleDataSource>(input, dataSourceMods);
        final PipelineContext<IngestModuleDataSource> dataSourcePipelineContext =
            new PipelineContext<IngestModuleDataSource>(dataSourceTask, processUnalloc);
        logger.log(Level.INFO, "Queing data source ingest task: " + dataSourceTask);
        progress.progress("DataSource Ingest" + " " + inputName, processed);
        final IngestScheduler.DataSourceScheduler dataSourceScheduler =
            scheduler.getDataSourceScheduler();
        dataSourceScheduler.schedule(dataSourcePipelineContext);
        progress.progress("DataSource Ingest" + " " + inputName, ++processed);

        // queue to file-level ingest pipeline
        final ScheduledTask<IngestModuleAbstractFile> fTask = new ScheduledTask(input, fileMods);
        final PipelineContext<IngestModuleAbstractFile> filepipelineContext =
            new PipelineContext<IngestModuleAbstractFile>(fTask, processUnalloc);
        logger.log(Level.INFO, "Queing file ingest task: " + fTask);
        progress.progress("File Ingest" + " " + inputName, processed);
        final IngestScheduler.FileScheduler fileScheduler = scheduler.getFileScheduler();
        fileScheduler.schedule(filepipelineContext);
        progress.progress("File Ingest" + " " + inputName, ++processed);
      } // for data sources

      // logger.log(Level.INFO, AbstractFileQueue.printQueue());
    }
Ejemplo n.º 14
0
  @Override
  public void setNode(final Node selectedNode) {
    // TODO why setNode() is called twice for the same node each time

    // to clear the viewer
    if (selectedNode == null) {
      currentNode = null;
      resetComponent();
      return;
    }

    this.currentNode = selectedNode;

    // sources are custom markup from the node (if available) and default
    // markup is fetched from solr
    List<MarkupSource> sources = new ArrayList<MarkupSource>();

    // add additional registered sources for this node
    sources.addAll(selectedNode.getLookup().lookupAll(MarkupSource.class));

    if (solrHasContent(selectedNode)) {
      Content content = selectedNode.getLookup().lookup(Content.class);
      if (content == null) {
        return;
      }

      // add to page tracking if not there yet
      final long contentID = content.getId();

      MarkupSource newSource =
          new MarkupSource() {

            private boolean inited = false;
            private int numPages = 0;
            private int currentPage = 0;
            private boolean hasChunks = false;

            @Override
            public int getCurrentPage() {
              return this.currentPage;
            }

            @Override
            public boolean hasNextPage() {
              return currentPage < numPages;
            }

            @Override
            public boolean hasPreviousPage() {
              return currentPage > 1;
            }

            @Override
            public int nextPage() {
              if (!hasNextPage()) {
                throw new IllegalStateException("No next page.");
              }
              ++currentPage;
              return currentPage;
            }

            @Override
            public int previousPage() {
              if (!hasPreviousPage()) {
                throw new IllegalStateException("No previous page.");
              }
              --currentPage;
              return currentPage;
            }

            @Override
            public boolean hasNextItem() {
              throw new UnsupportedOperationException("Not supported, not a searchable source.");
            }

            @Override
            public boolean hasPreviousItem() {
              throw new UnsupportedOperationException("Not supported, not a searchable source.");
            }

            @Override
            public int nextItem() {
              throw new UnsupportedOperationException("Not supported, not a searchable source.");
            }

            @Override
            public int previousItem() {
              throw new UnsupportedOperationException("Not supported, not a searchable source.");
            }

            @Override
            public int currentItem() {
              throw new UnsupportedOperationException("Not supported, not a searchable source.");
            }

            @Override
            public String getMarkup() {
              try {
                String content =
                    StringEscapeUtils.escapeHtml(
                        getSolrContent(selectedNode, currentPage, hasChunks));
                return "<pre>" + content.trim() + "</pre>";
              } catch (SolrServerException ex) {
                logger.log(Level.WARNING, "Couldn't get extracted content.", ex);
                return "";
              }
            }

            @Override
            public String toString() {
              return "Extracted Content";
            }

            @Override
            public boolean isSearchable() {
              return false;
            }

            @Override
            public String getAnchorPrefix() {
              return "";
            }

            @Override
            public int getNumberHits() {
              return 0;
            }

            @Override
            public LinkedHashMap<Integer, Integer> getHitsPages() {
              return null;
            }

            @Override
            public int getNumberPages() {
              if (inited) {
                return this.numPages;
              }

              final Server solrServer = KeywordSearch.getServer();

              try {
                numPages = solrServer.queryNumFileChunks(contentID);
                if (numPages == 0) {
                  numPages = 1;
                  hasChunks = false;
                } else {
                  hasChunks = true;
                }
                inited = true;
              } catch (SolrServerException ex) {
                logger.log(Level.WARNING, "Could not get number of chunks: ", ex);

              } catch (NoOpenCoreException ex) {
                logger.log(Level.WARNING, "Could not get number of chunks: ", ex);
              }
              return numPages;
            }
          };

      currentSource = newSource;
      sources.add(newSource);

      // init pages
      final int totalPages = currentSource.getNumberPages();
      int currentPage = currentSource.getCurrentPage();
      if (currentPage == 0 && currentSource.hasNextPage()) {
        currentSource.nextPage();
      }

      updatePageControls();
    }

    // first source will be the default displayed
    setPanel(sources);
    // If node has been selected before, return to the previous position
    scrollToCurrentHit();
  }
Ejemplo n.º 15
0
 private java.io.File getFsContentDest(Content fsc) {
   String path = dest.getAbsolutePath() + java.io.File.separator + fsc.getName();
   return new java.io.File(path);
 }
Ejemplo n.º 16
0
 /** Convenience method to make a new instance for given destination and extract given content */
 public static <T, V> void extract(
     Content cntnt, java.io.File dest, ProgressHandle progress, SwingWorker<T, V> worker) {
   cntnt.accept(new ExtractFscContentVisitor<>(dest, progress, worker, true));
 }
Ejemplo n.º 17
0
 @Override
 protected String defaultVisit(Content cntnt) {
   return cntnt.getName() + ":" + Long.toString(cntnt.getId());
 }
Ejemplo n.º 18
0
 public static String getSystemName(Content content) {
   return content.accept(systemName);
 }
Ejemplo n.º 19
0
 @Override
 protected Void defaultVisit(Content content) {
   throw new UnsupportedOperationException(
       NbBundle.getMessage(
           this.getClass(), "ContentUtils.exception.msg", content.getClass().getSimpleName()));
 }
  /**
   * @param queryRequest
   * @param toPopulate
   * @return
   */
  private boolean createFlatKeys(QueryRequest queryRequest, List<KeyValueQueryContent> toPopulate) {
    /** Check the validity of the requested query. */
    final KeywordSearchQuery keywordSearchQuery = queryRequest.getQuery();
    if (!keywordSearchQuery.validate()) {
      // TODO mark the particular query node RED
      return false;
    }

    /** Execute the requested query. */
    QueryResults queryResults;
    try {
      queryResults = keywordSearchQuery.performQuery();
    } catch (NoOpenCoreException ex) {
      logger.log(
          Level.SEVERE,
          "Could not perform the query " + keywordSearchQuery.getQueryString(),
          ex); // NON-NLS
      return false;
    }

    int id = 0;
    List<KeyValueQueryContent> tempList = new ArrayList<>();
    for (KeywordHit hit : getOneHitPerObject(queryResults)) {
      /** Get file properties. */
      Map<String, Object> properties = new LinkedHashMap<>();
      Content content = hit.getContent();
      if (content instanceof AbstractFile) {
        AbstractFsContentNode.fillPropertyMap(properties, (AbstractFile) content);
      } else {
        properties.put(
            AbstractAbstractFileNode.AbstractFilePropertyType.LOCATION.toString(),
            content.getName());
      }

      /** Add a snippet property, if available. */
      if (hit.hasSnippet()) {
        setCommonProperty(properties, CommonPropertyTypes.CONTEXT, hit.getSnippet());
      }

      // @@@ USE ConentHit in UniqueFileMap instead of the below search
      // get unique match result files
      // BC: @@@ THis is really ineffecient.  We should keep track of this when
      // we flattened the list of files to the unique files.
      final String highlightQueryEscaped =
          getHighlightQuery(
              keywordSearchQuery, keywordSearchQuery.isLiteral(), queryResults, content);

      String name = content.getName();
      if (hit.isArtifactHit()) name = hit.getArtifact().getDisplayName() + " Artifact"; // NON-NLS

      tempList.add(
          new KeyValueQueryContent(
              name,
              properties,
              ++id,
              hit.getSolrObjectId(),
              content,
              highlightQueryEscaped,
              keywordSearchQuery,
              queryResults));
    }

    // Add all the nodes to toPopulate at once. Minimizes node creation
    // EDT threads, which can slow and/or hang the UI on large queries.
    toPopulate.addAll(tempList);

    // write to bb
    // cannot reuse snippet in BlackboardResultWriter
    // because for regex searches in UI we compress results by showing a content per regex once
    // (even if multiple term hits)
    // whereas in bb we write every hit per content separately
    new BlackboardResultWriter(queryResults, queryRequest.getQuery().getKeywordList().getName())
        .execute();

    return true;
  }