/* * Generate a scaled image */ private static BufferedImage generateIcon(Content content, int iconSize) { InputStream inputStream = null; try { inputStream = new ReadContentInputStream(content); BufferedImage bi = ImageIO.read(inputStream); if (bi == null) { logger.log(Level.WARNING, "No image reader for file: " + content.getName()); // NON-NLS return null; } BufferedImage biScaled = ScalrWrapper.resizeFast(bi, iconSize); return biScaled; } catch (OutOfMemoryError e) { logger.log( Level.WARNING, "Could not scale image (too large): " + content.getName(), e); // NON-NLS return null; } catch (Exception e) { logger.log(Level.WARNING, "Could not scale image: " + content.getName(), e); // NON-NLS return null; } finally { if (inputStream != null) { try { inputStream.close(); } catch (IOException ex) { logger.log( Level.WARNING, "Could not close input stream after resizing thumbnail: " + content.getName(), ex); // NON-NLS } } } }
public Void visitDir(AbstractFile dir) { // don't extract . and .. directories if (isDotDirectory(dir)) { return null; } dest.mkdir(); try { int numProcessed = 0; // recurse on children for (Content child : dir.getChildren()) { java.io.File childFile = getFsContentDest(child); ExtractFscContentVisitor<T, V> childVisitor = new ExtractFscContentVisitor<>(childFile, progress, worker, false); // If this is the source directory of an extract it // will have a progress and worker, and will keep track // of the progress bar's progress if (worker != null && worker.isCancelled()) { break; } if (progress != null && source) { progress.progress(child.getName(), numProcessed); } child.accept(childVisitor); numProcessed++; } } catch (TskException ex) { logger.log(Level.SEVERE, "Trouble fetching children to extract.", ex); // NON-NLS } return null; }
/** * Reads all the data from any content object and writes (extracts) it to a file. * * @param content Any content object. * @param outputFile Will be created if it doesn't exist, and overwritten if it does * @param progress progress bar handle to update, if available. null otherwise * @param worker the swing worker background thread the process runs within, or null, if in the * main thread, used to handle task cancellation * @param source true if source file * @return number of bytes extracted * @throws IOException if file could not be written */ public static <T> long writeToFile( Content content, java.io.File outputFile, ProgressHandle progress, Future<T> worker, boolean source) throws IOException { InputStream in = new ReadContentInputStream(content); // Get the unit size for a progress bar int unit = (int) (content.getSize() / 100); long totalRead = 0; try (FileOutputStream out = new FileOutputStream(outputFile, false)) { byte[] buffer = new byte[TO_FILE_BUFFER_SIZE]; int len = in.read(buffer); while (len != -1) { // If there is a worker, check for a cancelation if (worker != null && worker.isCancelled()) { break; } out.write(buffer, 0, len); len = in.read(buffer); totalRead += len; // If there is a progress bar and this is the source file, // report any progress if (progress != null && source && totalRead >= TO_FILE_BUFFER_SIZE) { int totalProgress = (int) (totalRead / unit); progress.progress(content.getName(), totalProgress); // If it's not the source, just update the file being processed } else if (progress != null && !source) { progress.progress(content.getName()); } } } finally { in.close(); } return totalRead; }
/** * IngestManager entry point, enqueues data to be processed and starts new ingest as needed, or * just enqueues data to an existing pipeline. * * <p>Spawns background thread which enumerates all sorted files and executes chosen modules per * file in a pre-determined order. Notifies modules when work is complete or should be interrupted * using complete() and stop() calls. Does not block and can be called multiple times to enqueue * more work to already running background ingest process. * * @param modules modules to execute on the data source input * @param input input data source Content objects to execute the ingest modules on */ public void execute(final List<IngestModuleAbstract> modules, final Content input) { List<Content> inputs = new ArrayList<Content>(); inputs.add(input); logger.log(Level.INFO, "Will enqueue input: " + input.getName()); execute(modules, inputs); }
private void queueAll(List<IngestModuleAbstract> modules, final List<Content> inputs) { int processed = 0; for (Content input : inputs) { final String inputName = input.getName(); final List<IngestModuleDataSource> dataSourceMods = new ArrayList<IngestModuleDataSource>(); final List<IngestModuleAbstractFile> fileMods = new ArrayList<IngestModuleAbstractFile>(); for (IngestModuleAbstract module : modules) { if (isCancelled()) { logger.log(Level.INFO, "Terminating ingest queueing due to cancellation."); return; } final String moduleName = module.getName(); progress.progress(moduleName + " " + inputName, processed); switch (module.getType()) { case DataSource: final IngestModuleDataSource newModuleInstance = (IngestModuleDataSource) moduleLoader.getNewIngestModuleInstance(module); if (newModuleInstance != null) { dataSourceMods.add(newModuleInstance); } else { logger.log( Level.INFO, "Error loading module and adding input " + inputName + " with module " + module.getName()); } break; case AbstractFile: // enqueue the same singleton AbstractFile module logger.log( Level.INFO, "Adding input " + inputName + " for AbstractFileModule " + module.getName()); fileMods.add((IngestModuleAbstractFile) module); break; default: logger.log(Level.SEVERE, "Unexpected module type: " + module.getType().name()); } } // for modules // queue to schedulers // queue to datasource-level ingest pipeline(s) final boolean processUnalloc = getProcessUnallocSpace(); final ScheduledTask<IngestModuleDataSource> dataSourceTask = new ScheduledTask<IngestModuleDataSource>(input, dataSourceMods); final PipelineContext<IngestModuleDataSource> dataSourcePipelineContext = new PipelineContext<IngestModuleDataSource>(dataSourceTask, processUnalloc); logger.log(Level.INFO, "Queing data source ingest task: " + dataSourceTask); progress.progress("DataSource Ingest" + " " + inputName, processed); final IngestScheduler.DataSourceScheduler dataSourceScheduler = scheduler.getDataSourceScheduler(); dataSourceScheduler.schedule(dataSourcePipelineContext); progress.progress("DataSource Ingest" + " " + inputName, ++processed); // queue to file-level ingest pipeline final ScheduledTask<IngestModuleAbstractFile> fTask = new ScheduledTask(input, fileMods); final PipelineContext<IngestModuleAbstractFile> filepipelineContext = new PipelineContext<IngestModuleAbstractFile>(fTask, processUnalloc); logger.log(Level.INFO, "Queing file ingest task: " + fTask); progress.progress("File Ingest" + " " + inputName, processed); final IngestScheduler.FileScheduler fileScheduler = scheduler.getFileScheduler(); fileScheduler.schedule(filepipelineContext); progress.progress("File Ingest" + " " + inputName, ++processed); } // for data sources // logger.log(Level.INFO, AbstractFileQueue.printQueue()); }
private java.io.File getFsContentDest(Content fsc) { String path = dest.getAbsolutePath() + java.io.File.separator + fsc.getName(); return new java.io.File(path); }
@Override protected String defaultVisit(Content cntnt) { return cntnt.getName() + ":" + Long.toString(cntnt.getId()); }
/** * @param queryRequest * @param toPopulate * @return */ private boolean createFlatKeys(QueryRequest queryRequest, List<KeyValueQueryContent> toPopulate) { /** Check the validity of the requested query. */ final KeywordSearchQuery keywordSearchQuery = queryRequest.getQuery(); if (!keywordSearchQuery.validate()) { // TODO mark the particular query node RED return false; } /** Execute the requested query. */ QueryResults queryResults; try { queryResults = keywordSearchQuery.performQuery(); } catch (NoOpenCoreException ex) { logger.log( Level.SEVERE, "Could not perform the query " + keywordSearchQuery.getQueryString(), ex); // NON-NLS return false; } int id = 0; List<KeyValueQueryContent> tempList = new ArrayList<>(); for (KeywordHit hit : getOneHitPerObject(queryResults)) { /** Get file properties. */ Map<String, Object> properties = new LinkedHashMap<>(); Content content = hit.getContent(); if (content instanceof AbstractFile) { AbstractFsContentNode.fillPropertyMap(properties, (AbstractFile) content); } else { properties.put( AbstractAbstractFileNode.AbstractFilePropertyType.LOCATION.toString(), content.getName()); } /** Add a snippet property, if available. */ if (hit.hasSnippet()) { setCommonProperty(properties, CommonPropertyTypes.CONTEXT, hit.getSnippet()); } // @@@ USE ConentHit in UniqueFileMap instead of the below search // get unique match result files // BC: @@@ THis is really ineffecient. We should keep track of this when // we flattened the list of files to the unique files. final String highlightQueryEscaped = getHighlightQuery( keywordSearchQuery, keywordSearchQuery.isLiteral(), queryResults, content); String name = content.getName(); if (hit.isArtifactHit()) name = hit.getArtifact().getDisplayName() + " Artifact"; // NON-NLS tempList.add( new KeyValueQueryContent( name, properties, ++id, hit.getSolrObjectId(), content, highlightQueryEscaped, keywordSearchQuery, queryResults)); } // Add all the nodes to toPopulate at once. Minimizes node creation // EDT threads, which can slow and/or hang the UI on large queries. toPopulate.addAll(tempList); // write to bb // cannot reuse snippet in BlackboardResultWriter // because for regex searches in UI we compress results by showing a content per regex once // (even if multiple term hits) // whereas in bb we write every hit per content separately new BlackboardResultWriter(queryResults, queryRequest.getQuery().getKeywordList().getName()) .execute(); return true; }