@Test public void testWaitForNoMoreSplits() throws Exception { SourceOperator operator = createExchangeOperator(); // add a buffer location containing one page and close the buffer operator.addSplit(newRemoteSplit(TASK_1_ID)); // add pages and leave buffers open taskBuffers.getUnchecked(TASK_1_ID).addPages(1, true); // read page waitForPages(operator, 1); // verify state assertEquals(operator.isFinished(), false); assertEquals(operator.needsInput(), false); assertEquals(operator.getOutput(), null); // add a buffer location operator.addSplit(newRemoteSplit(TASK_2_ID)); // set no more splits (buffer locations) operator.noMoreSplits(); // add two pages and close the last buffer taskBuffers.getUnchecked(TASK_2_ID).addPages(2, true); // read all pages waitForPages(operator, 2); // wait for finished waitForFinished(operator); }
@Test public void testFinish() throws Exception { SourceOperator operator = createExchangeOperator(); operator.addSplit(newRemoteSplit(TASK_1_ID)); operator.addSplit(newRemoteSplit(TASK_2_ID)); operator.addSplit(newRemoteSplit(TASK_3_ID)); operator.noMoreSplits(); // add pages and leave buffers open taskBuffers.getUnchecked(TASK_1_ID).addPages(1, false); taskBuffers.getUnchecked(TASK_2_ID).addPages(1, false); taskBuffers.getUnchecked(TASK_3_ID).addPages(1, false); // read 3 pages waitForPages(operator, 3); // verify state assertEquals(operator.isFinished(), false); assertEquals(operator.needsInput(), false); assertEquals(operator.getOutput(), null); // finish without closing buffers operator.finish(); // wait for finished waitForFinished(operator); }
@Override public void write(E entity) { Preconditions.checkState( state.equals(ReaderWriterState.OPEN), "Attempt to write to a writer in state:%s", state); accessor.keyFor(entity, provided, reusedKey); DatasetWriter<E> writer = cachedWriters.getIfPresent(reusedKey); if (writer == null) { // avoid checking in every whether the entity belongs in the view by only // checking when a new writer is created Preconditions.checkArgument( view.includes(entity), "View %s does not include entity %s", view, entity); // get a new key because it is stored in the cache StorageKey key = StorageKey.copy(reusedKey); try { writer = cachedWriters.getUnchecked(key); } catch (UncheckedExecutionException ex) { throw new IllegalArgumentException( "Problem creating view for entity: " + entity, ex.getCause()); } } writer.write(entity); }
@Override public FileObject getFileForOutput( Location location, String packageName, String relativeName, FileObject sibling) throws IOException { URI uri = uriForFileObject(location, packageName, relativeName); return inMemoryFileObjects.getUnchecked(uri); }
public void unlock(Object name) { Monitor lock; synchronized (stringLocks) { lock = stringLocks.getUnchecked(name); } lock.leave(); }
public boolean tryLock(Object name) { Monitor lock; synchronized (stringLocks) { lock = stringLocks.getUnchecked(name); } return lock.tryEnter(); }
@VisibleForTesting public CxxPreprocessAndCompile createPreprocessBuildRule(String name, CxxSource source) { Preconditions.checkArgument(CxxSourceTypes.isPreprocessableType(source.getType())); BuildTarget target = createPreprocessBuildTarget(name, source.getType()); PreprocessorDelegate preprocessorDelegate = preprocessorDelegates.getUnchecked( PreprocessAndCompilePreprocessorDelegateKey.of(source.getType(), source.getFlags())); // Build the CxxCompile rule and add it to our sorted set of build rules. CxxPreprocessAndCompile result = CxxPreprocessAndCompile.preprocess( getParams() .copyWithChanges( target, new DepsBuilder() .addPreprocessDeps() .add(preprocessorDelegate.getPreprocessor()) .add(source), Suppliers.ofInstance(ImmutableSortedSet.<BuildRule>of())), getPathResolver(), preprocessorDelegate, new CompilerDelegate( getPathResolver(), getCxxPlatform().getDebugPathSanitizer(), getCompiler(source.getType()), computeCompilerFlags(source.getType(), source.getFlags())), getPreprocessOutputPath(target, source.getType(), name), source.getPath(), source.getType(), getCxxPlatform().getDebugPathSanitizer()); getResolver().addToIndex(result); return result; }
@VisibleForTesting public Set<String> getSecurityGroupsForTagAndOptions( String region, @Nullable String group, TemplateOptions options) { Builder<String> groups = ImmutableSet.builder(); if (group != null) { String markerGroup = namingConvention.create().sharedNameForGroup(group); groups.add(markerGroup); RegionNameAndIngressRules regionNameAndIngressRulesForMarkerGroup; if (userSpecifiedTheirOwnGroups(options)) { regionNameAndIngressRulesForMarkerGroup = new RegionNameAndIngressRules(region, markerGroup, new int[] {}, false); groups.addAll(EC2TemplateOptions.class.cast(options).getGroups()); } else { regionNameAndIngressRulesForMarkerGroup = new RegionNameAndIngressRules(region, markerGroup, options.getInboundPorts(), true); } // this will create if not yet exists. securityGroupMap.getUnchecked(regionNameAndIngressRulesForMarkerGroup); } return groups.build(); }
@Override public Response<List<Address>> ancestorOfAddresses(Integer anyId) { Response<List<Address>> result = new Response<List<Address>>(); if (anyId == null) { log.error("id can not be null"); result.setError("params.not.null"); return result; } List<Address> addresses = Lists.newArrayListWithExpectedSize(3); try { Integer id = anyId; while (id > 0) { Address address = addressCache.getUnchecked(id); addresses.add(address); id = address.getParentId(); } result.setResult(addresses); return result; } catch (Exception e) { log.error( "failed find ancestors of address(id={}), cause:{}", anyId, Throwables.getStackTraceAsString(e)); result.setError("ancestor.query.fail"); return result; } }
private <T> void evaluateBundle( final AppliedPTransform<?, ?, ?> transform, final CommittedBundle<T> bundle, final CompletionCallback onComplete) { TransformExecutorService transformExecutor; if (isKeyed(bundle.getPCollection())) { final StepAndKey stepAndKey = StepAndKey.of(transform, bundle.getKey()); // This executor will remain reachable until it has executed all scheduled transforms. // The TransformExecutors keep a strong reference to the Executor, the ExecutorService keeps // a reference to the scheduled TransformExecutor callable. Follow-up TransformExecutors // (scheduled due to the completion of another TransformExecutor) are provided to the // ExecutorService before the Earlier TransformExecutor callable completes. transformExecutor = executorServices.getUnchecked(stepAndKey); } else { transformExecutor = parallelExecutorService; } Collection<ModelEnforcementFactory> enforcements = MoreObjects.firstNonNull( transformEnforcements.get(transform.getTransform().getClass()), Collections.<ModelEnforcementFactory>emptyList()); TransformExecutor<T> callable = TransformExecutor.create( evaluationContext, registry, enforcements, bundle, transform, onComplete, transformExecutor); outstandingWork.incrementAndGet(); transformExecutor.schedule(callable); }
@Override public User find(String username) { User user = cache.getUnchecked(username); if (user.getUsername() == null) { return null; } return user; }
protected void checkConsistent( Map<String, InputStream> map, LoadingCache<String, Node> store, String key, String id, String name) throws IOException { assertEquals(map.size(), 1); if (store.size() == 0) store.getUnchecked(key); assertEquals(store.size(), 1); // checkRepeatedRead assertEquals(store.getUnchecked(key), Node.builder().id(id).name(name).build()); assertEquals(store.getUnchecked(key), Node.builder().id(id).name(name).build()); // checkRepeatedRead checkToYaml(map, key, id, name); checkToYaml(map, key, id, name); }
@Override public CompletableFuture<Boolean> remove(K key, V value) { checkNotNull(key, ERROR_NULL_KEY); checkNotNull(value, ERROR_NULL_VALUE); checkIfUnmodifiable(); return database .remove(name, keyCache.getUnchecked(key), serializer.encode(value)) .thenApply(this::unwrapResult); }
private Slice getColumnSlice( TpchTableHandle tableHandle, TpchColumnHandle columnHandle, BlocksFileEncoding encoding) { checkNotNull(tableHandle, "tableHandle is null"); checkNotNull(columnHandle, "columnHandle is null"); checkNotNull(encoding, "encoding is null"); File columnFile = tpchDataFileLoader.getDataFile(tableHandle, columnHandle, encoding); return mappedFileCache.getUnchecked(columnFile.getAbsolutePath()); }
@Override public ServiceDiscovered discover(final String name) { for (DiscoverableProgramType type : DiscoverableProgramType.values()) { if (type.isPrefixOf(name)) { return clients.getUnchecked(name).discover(name); } } return delegate.discover(name); }
@Override public CompletableFuture<Boolean> replace(K key, V oldValue, V newValue) { checkNotNull(key, ERROR_NULL_KEY); checkNotNull(newValue, ERROR_NULL_VALUE); checkIfUnmodifiable(); byte[] existing = oldValue != null ? serializer.encode(oldValue) : null; return database .replace(name, keyCache.getUnchecked(key), existing, serializer.encode(newValue)) .thenApply(this::unwrapResult); }
@Override public CompletableFuture<Versioned<V>> get(K key) { checkNotNull(key, ERROR_NULL_KEY); return database .get(name, keyCache.getUnchecked(key)) .thenApply( v -> v != null ? new Versioned<>(serializer.decode(v.value()), v.version(), v.creationTime()) : null); }
public V get(K key, Errors errors) throws ErrorsException { Object resultOrError = delegate.getUnchecked(key); if (resultOrError instanceof Errors) { errors.merge((Errors) resultOrError); throw errors.toException(); } else { @SuppressWarnings("unchecked") // create returned a non-error result, so this is safe V result = (V) resultOrError; return result; } }
@Test public void testSimple() throws Exception { SourceOperator operator = createExchangeOperator(); operator.addSplit(newRemoteSplit(TASK_1_ID)); operator.addSplit(newRemoteSplit(TASK_2_ID)); operator.addSplit(newRemoteSplit(TASK_3_ID)); operator.noMoreSplits(); // add pages and close the buffers taskBuffers.getUnchecked(TASK_1_ID).addPages(10, true); taskBuffers.getUnchecked(TASK_2_ID).addPages(10, true); taskBuffers.getUnchecked(TASK_3_ID).addPages(10, true); // read the pages waitForPages(operator, 30); // wait for finished waitForFinished(operator); }
protected void put( Map<String, InputStream> map, LoadingCache<String, Node> store, String key, String id, String name) { assertEquals(store.size(), 0); assertEquals(map.size(), 0); map.put( key, new ByteArrayInputStream(String.format("id: %s\nname: %s\n", id, name).getBytes())); store.getUnchecked(key); }
public SourceOperatorFactory compileScanFilterAndProjectOperator( int operatorId, PlanNodeId sourceId, DataStreamProvider dataStreamProvider, List<ColumnHandle> columns, RowExpression filter, List<RowExpression> projections) { OperatorCacheKey cacheKey = new OperatorCacheKey(filter, projections, sourceId); return sourceOperatorFactories .getUnchecked(cacheKey) .create(operatorId, dataStreamProvider, columns); }
private TimingResourceFilter( AbstractMethod abstractMethod, LoadingCache<String, RequestStats> requestStatsLoadingCache) { this.abstractMethod = abstractMethod; String objectName = new ObjectNameBuilder( abstractMethod.getResource().getResourceClass().getPackage().getName()) .withProperty("type", abstractMethod.getResource().getResourceClass().getSimpleName()) .build(); RequestStats requestStats = requestStatsLoadingCache.getUnchecked(objectName); timingFilter = new TimingFilter(this.abstractMethod, requestStats); }
public void addLogEntry(String id, LogEntry logEntry) { try { OutputStream outputStream = logStreamCache.getUnchecked(id); ByteArrayOutputStream baos = new ByteArrayOutputStream(); objectMapper.writer().writeValue(baos, logEntry); synchronized (outputStream) { outputStream.write(("\n" + baos.size() + "\n").getBytes()); outputStream.write(baos.toByteArray()); } } catch (Throwable ignored) { // do not log errors when logging } }
protected void remove( Map<String, InputStream> map, LoadingCache<String, Node> store, String key) { store.invalidate(key); assertEquals(store.size(), 0); map.remove(key); assertEquals(map.size(), 0); try { assertEquals(store.getUnchecked(key), null); fail("should not work as null is invalid"); } catch (UncheckedExecutionException e) { } assertEquals(map.get(key), null); }
@Override public CompletableFuture<Versioned<V>> put(K key, V value) { checkNotNull(key, ERROR_NULL_KEY); checkNotNull(value, ERROR_NULL_VALUE); checkIfUnmodifiable(); return database .put(name, keyCache.getUnchecked(key), serializer.encode(value)) .thenApply(this::unwrapResult) .thenApply( v -> v != null ? new Versioned<>(serializer.decode(v.value()), v.version(), v.creationTime()) : null); }
@Override public User check(String username, String password) { if (username == null || password == null) { return null; } User user = cache.getUnchecked(username); if (user == null || user.getUsername() == null || user.getPassword() == null) { return null; } String encryptPassword = generateEncryptPassword(user, password); if (user.getPassword().equals(encryptPassword)) { return user; } return null; }
@Override public CompletableFuture<Versioned<V>> remove(K key) { checkNotNull(key, ERROR_NULL_KEY); checkIfUnmodifiable(); return database .remove(name, keyCache.getUnchecked(key)) .thenApply(this::unwrapResult) .thenApply(v -> v != null ? v.<V>map(serializer::decode) : null) .whenComplete( (r, e) -> { if (r != null) { notifyListeners(new MapEvent<>(name, MapEvent.Type.REMOVE, key, r)); } }); }
@Override public synchronized void removeThingLogic(IThingLogic tl) { fireThingLogicManagerEvent(EventType.LOGIC_REMOVING, tl); long time; if (DEBUG) { time = System.nanoTime(); } logics.remove(tl); tl.setBNAWorld(null); typedLogics.remove(tl.getClass()); if (DEBUG) { time = System.nanoTime() - time; debugStats.getUnchecked(tl).addAndGet(time); } fireThingLogicManagerEvent(EventType.LOGIC_REMOVED, tl); }
/** * Request the currently configured client * * @return */ public static ObjectStorageProviderClient getInstance() throws NoSuchElementException { if (lastClient.get() == null || UNSET.equals(lastClient.get())) { if (!Strings.isNullOrEmpty(providerClient)) { if (clients.containsKey(providerClient)) { lastClient.set(providerClient); } } else { throw new NoSuchElementException( "OSG object storage provider client not configured. Found property 'objectstorage.providerclient' empty or unset manager(" + lastClient + "). Legal values are: " + Joiner.on(",").join(clients.keySet())); } } return clientInstances.getUnchecked(lastClient.get()); }
@Override public CompletableFuture<Versioned<V>> putAndGet(K key, V value) { checkNotNull(key, ERROR_NULL_KEY); checkNotNull(value, ERROR_NULL_VALUE); checkIfUnmodifiable(); return database .putAndGet(name, keyCache.getUnchecked(key), serializer.encode(value)) .thenApply(this::unwrapResult) .thenApply( v -> { Versioned<byte[]> rawNewValue = v.newValue(); return new Versioned<>( serializer.decode(rawNewValue.value()), rawNewValue.version(), rawNewValue.creationTime()); }); }