private static int detectVersion() { Log log = LogFactory.getLog(LuceneVersionDetector.class, Log.class); int version = 3; try { Class.forName( "org.apache.lucene.store.IOContext", true, LuceneVersionDetector.class.getClassLoader()); version = 4; } catch (ClassNotFoundException e) { } log.detectedLuceneVersion(version); return version; }
/** * @param key {@link org.infinispan.lucene.IndexScopedKey} * @return true if the indexKey matches a loadable entry */ protected boolean containsKey(final IndexScopedKey key) { try { return key.accept(containsKeyVisitor); } catch (Exception e) { throw log.exceptionInCacheLoader(e); } }
/** Load the value for a specific key */ protected Object load(final IndexScopedKey key) { try { return key.accept(loadVisitor); } catch (Exception e) { throw log.exceptionInCacheLoader(e); } }
/** * Closes the underlying Directory. After it's closed, no other invocations are expected on this * Adapter; we don't check explicitly for it as the Directory instance takes care of it. */ protected void close() { try { directory.close(); } catch (IOException e) { // log but continue execution: we might want to try closing more instance log.errorOnFSDirectoryClose(e); } }
/** * Index segment files might be larger than 2GB; so it's possible to have an autoChunksize which * is too low to contain all bytes in a single array (overkill anyway). In this case we ramp up * and try splitting with larger chunkSize values. */ public static int figureChunksNumber( final String fileName, final long fileLength, int chunkSize) { if (chunkSize < 0) { throw new IllegalStateException("Overflow in rescaling chunkSize. File way too large?"); } final long numChunks = (fileLength % chunkSize == 0) ? (fileLength / chunkSize) : (fileLength / chunkSize) + 1; if (numChunks > Integer.MAX_VALUE) { log.rescalingChunksize(fileName, fileLength, chunkSize); chunkSize = 32 * chunkSize; return figureChunksNumber(fileName, fileLength, chunkSize); } else { return (int) numChunks; } }
/** * Load some keys in the collector, excluding some and to a maximum number of collected * (non-excluded) keys. * * @param keysCollector the set where to add loaded keys to * @param keysToExclude which keys should not be loaded. Warning: can be null! Means all keys are * to be returned * @param maxElements upper limit for collection */ private void loadSomeKeys( final HashSet<IndexScopedKey> keysCollector, final Set<IndexScopedKey> keysToExclude, final int maxElements) { if (maxElements <= 0) { return; } int collectedKeys = 0; // First we collect the (single) FileListCacheKey FileListCacheKey rootKey = new FileListCacheKey(indexName); if (keysToExclude == null || !keysToExclude.contains(rootKey)) { // unless it was excluded if (keysCollector.add(rootKey)) { // unless it was already collected collectedKeys++; } } try { // Now we collect first all FileCacheKey (keys for file metadata) String[] listAll = directory.listAll(); for (String fileName : listAll) { if (collectedKeys >= maxElements) return; FileCacheKey key = new FileCacheKey(indexName, fileName); if (keysToExclude == null || !keysToExclude.contains(key)) { if (keysCollector.add(key)) { if (++collectedKeys >= maxElements) return; } } } // Next we load the ChunkCacheKey (keys for file contents) for (String fileName : listAll) { int numChunksInt = figureChunksNumber(fileName); for (int i = 0; i < numChunksInt; i++) { // Inner loop: we actually have several Chunks per file name ChunkCacheKey key = new ChunkCacheKey(indexName, fileName, i, autoChunkSize); if (keysToExclude == null || !keysToExclude.contains(key)) { if (keysCollector.add(key)) { if (++collectedKeys >= maxElements) return; } } } } } catch (IOException e) { throw log.exceptionInCacheLoader(e); } }