@Override protected HazelcastClientCacheManager createHazelcastCacheManager( URI uri, ClassLoader classLoader, Properties properties) { final HazelcastInstance instance; // uri is null or default or a non hazelcast one, then we use the internal shared instance if (uri == null || uri.equals(getDefaultURI())) { if (hazelcastInstance == null) { try { hazelcastInstance = instanceFromProperties(classLoader, properties, true); } catch (Exception e) { throw ExceptionUtil.rethrow(e); } } instance = hazelcastInstance; } else { try { instance = instanceFromProperties(classLoader, properties, false); if (instance == null) { throw new IllegalArgumentException(INVALID_HZ_INSTANCE_SPECIFICATION_MESSAGE); } } catch (Exception e) { throw ExceptionUtil.rethrow(e); } } return new HazelcastClientCacheManager(this, instance, uri, classLoader, properties); }
public Node(HazelcastInstanceImpl hazelcastInstance, Config config, NodeContext nodeContext) { this.hazelcastInstance = hazelcastInstance; this.config = config; this.configClassLoader = config.getClassLoader(); this.groupProperties = new GroupProperties(config); this.buildInfo = BuildInfoProvider.getBuildInfo(); String loggingType = groupProperties.getString(GroupProperty.LOGGING_TYPE); loggingService = new LoggingServiceImpl(config.getGroupConfig().getName(), loggingType, buildInfo); final AddressPicker addressPicker = nodeContext.createAddressPicker(this); try { addressPicker.pickAddress(); } catch (Throwable e) { throw ExceptionUtil.rethrow(e); } final ServerSocketChannel serverSocketChannel = addressPicker.getServerSocketChannel(); try { address = addressPicker.getPublicAddress(); final Map<String, Object> memberAttributes = findMemberAttributes(config.getMemberAttributeConfig().asReadOnly()); localMember = new MemberImpl( address, true, createMemberUuid(address), hazelcastInstance, memberAttributes); loggingService.setThisMember(localMember); logger = loggingService.getLogger(Node.class.getName()); hazelcastThreadGroup = new HazelcastThreadGroup(hazelcastInstance.getName(), logger, configClassLoader); nodeExtension = NodeExtensionFactory.create(configClassLoader); nodeExtension.beforeStart(this); serializationService = nodeExtension.createSerializationService(); securityContext = config.getSecurityConfig().isEnabled() ? nodeExtension.getSecurityContext() : null; nodeEngine = new NodeEngineImpl(this); clientEngine = new ClientEngineImpl(this); connectionManager = nodeContext.createConnectionManager(this, serverSocketChannel); partitionService = new InternalPartitionServiceImpl(this); clusterService = new ClusterServiceImpl(this); textCommandService = new TextCommandServiceImpl(this); nodeExtension.printNodeInfo(this); multicastService = createMulticastService(addressPicker.getBindAddress(), this, config, logger); discoveryService = createDiscoveryService(config); initializeListeners(config); joiner = nodeContext.createJoiner(this); } catch (Throwable e) { try { serverSocketChannel.close(); } catch (Throwable ignored) { } throw ExceptionUtil.rethrow(e); } }
protected Object getInternal(K key, ExpiryPolicy expiryPolicy, boolean async) { ensureOpen(); validateNotNull(key); final Data keyData = toData(key); Object cached = getFromNearCache(keyData, async); if (cached != null) { return cached; } final Data expiryPolicyData = toData(expiryPolicy); ClientMessage request = CacheGetCodec.encodeRequest(nameWithPrefix, keyData, expiryPolicyData); ClientInvocationFuture future; try { final int partitionId = clientContext.getPartitionService().getPartitionId(key); final HazelcastClientInstanceImpl client = (HazelcastClientInstanceImpl) clientContext.getHazelcastInstance(); final ClientInvocation clientInvocation = new ClientInvocation(client, request, partitionId); future = clientInvocation.invoke(); } catch (Exception e) { throw ExceptionUtil.rethrow(e); } SerializationService serializationService = clientContext.getSerializationService(); ClientDelegatingFuture<V> delegatingFuture = new ClientDelegatingFuture<V>(future, serializationService, cacheGetResponseDecoder); if (async) { if (nearCache != null) { delegatingFuture.andThenInternal( new ExecutionCallback<Data>() { public void onResponse(Data valueData) { storeInNearCache(keyData, valueData, null); } public void onFailure(Throwable t) {} }); } return delegatingFuture; } else { try { Object value = delegatingFuture.get(); if (nearCache != null) { storeInNearCache(keyData, (Data) delegatingFuture.getResponse(), null); } if (!(value instanceof Data)) { return value; } else { return serializationService.toObject(value); } } catch (Throwable e) { throw ExceptionUtil.rethrowAllowedTypeFirst(e, CacheException.class); } } }
protected void updateCacheListenerConfigOnOtherNodes( CacheEntryListenerConfiguration<K, V> cacheEntryListenerConfiguration, boolean isRegister) { final Collection<Member> members = clientContext.getClusterService().getMemberList(); final HazelcastClientInstanceImpl client = (HazelcastClientInstanceImpl) clientContext.getHazelcastInstance(); final Collection<Future> futures = new ArrayList<Future>(); for (Member member : members) { try { final Address address = member.getAddress(); Data configData = toData(cacheEntryListenerConfiguration); final ClientMessage request = CacheListenerRegistrationCodec.encodeRequest( nameWithPrefix, configData, isRegister, address.getHost(), address.getPort()); final ClientInvocation invocation = new ClientInvocation(client, request, address); final Future future = invocation.invoke(); futures.add(future); } catch (Exception e) { ExceptionUtil.sneakyThrow(e); } } // make sure all configs are created // TODO do we need this ???s // try { // FutureUtil.waitWithDeadline(futures, // CacheProxyUtil.AWAIT_COMPLETION_TIMEOUT_SECONDS, TimeUnit.SECONDS); // } catch (TimeoutException e) { // logger.warning(e); // } }
private SerializationServiceImpl initSerializationService(ClientConfig config) { SerializationService ss; try { String partitioningStrategyClassName = System.getProperty(GroupProperties.PROP_PARTITIONING_STRATEGY_CLASS); final PartitioningStrategy partitioningStrategy; if (partitioningStrategyClassName != null && partitioningStrategyClassName.length() > 0) { partitioningStrategy = ClassLoaderUtil.newInstance(config.getClassLoader(), partitioningStrategyClassName); } else { partitioningStrategy = new DefaultPartitioningStrategy(); } ss = new SerializationServiceBuilder() .setManagedContext( new HazelcastClientManagedContext(this, config.getManagedContext())) .setClassLoader(config.getClassLoader()) .setConfig(config.getSerializationConfig()) .setPartitioningStrategy(partitioningStrategy) .build(); } catch (Exception e) { throw ExceptionUtil.rethrow(e); } return (SerializationServiceImpl) ss; }
public InetAddress getInetAddress() { try { return thisAddress.getInetAddress(); } catch (UnknownHostException e) { throw ExceptionUtil.rethrow(e); } }
protected void sleep() { try { TimeUnit.MILLISECONDS.sleep(WAIT_PARTITION_TABLE_UPDATE_MILLIS); } catch (InterruptedException e) { throw ExceptionUtil.rethrow(e); } }
public InetAddress getInetAddress() { try { return localEndpoint.getInetAddress(); } catch (UnknownHostException e) { throw ExceptionUtil.rethrow(e); } }
protected void removeAllKeysInternal(Set<? extends K> keys) { final long start = System.nanoTime(); final Set<Data> keysData; keysData = new HashSet<Data>(); for (K key : keys) { keysData.add(toData(key)); } final int partitionCount = clientContext.getPartitionService().getPartitionCount(); final int completionId = nextCompletionId(); registerCompletionLatch(completionId, partitionCount); ClientMessage request = CacheRemoveAllKeysCodec.encodeRequest(nameWithPrefix, keysData, completionId); try { invoke(request); waitCompletionLatch(completionId, null); if (statisticsEnabled) { // Actually we don't know how many of them are really removed or not. // We just assume that if there is no exception, all of them are removed. // Otherwise (if there is an exception), we don't update any cache stats about remove. statistics.increaseCacheRemovals(keysData.size()); statistics.addRemoveTimeNanos(System.nanoTime() - start); } } catch (Throwable t) { deregisterCompletionLatch(completionId); throw ExceptionUtil.rethrowAllowedTypeFirst(t, CacheException.class); } }
protected <T> ICompletableFuture<T> getAndRemoveAsyncInternal( K key, boolean withCompletionEvent, boolean async) { final long start = System.nanoTime(); ensureOpen(); validateNotNull(key); CacheProxyUtil.validateConfiguredTypes(cacheConfig, key); final Data keyData = toData(key); final int completionId = withCompletionEvent ? nextCompletionId() : -1; ClientMessage request = CacheGetAndRemoveCodec.encodeRequest(nameWithPrefix, keyData, completionId); ClientInvocationFuture future; try { future = invoke(request, keyData, completionId); invalidateNearCache(keyData); } catch (Exception e) { throw ExceptionUtil.rethrow(e); } ClientDelegatingFuture delegatingFuture = new ClientDelegatingFuture<T>( future, clientContext.getSerializationService(), getAndRemoveResponseDecoder); if (async && statisticsEnabled) { delegatingFuture.andThen( new ExecutionCallback<Object>() { public void onResponse(Object responseData) { Object response = clientContext.getSerializationService().toObject(responseData); handleStatisticsOnRemove(true, start, response); } public void onFailure(Throwable t) {} }); } return delegatingFuture; }
protected <T> T getSafely(Future<T> future) { try { return future.get(); } catch (Throwable throwable) { throw ExceptionUtil.rethrow(throwable); } }
@Override public ICompletableFuture<Long> addAllAsync( Collection<? extends E> collection, OverflowPolicy overflowPolicy) { checkNotNull(collection, "collection can't be null"); checkNotNull(overflowPolicy, "overflowPolicy can't be null"); checkFalse(collection.isEmpty(), "collection can't be empty"); checkTrue( collection.size() <= MAX_BATCH_SIZE, "collection can't be larger than " + MAX_BATCH_SIZE); final List<Data> valueList = new ArrayList<Data>(collection.size()); for (E e : collection) { throwExceptionIfNull(e); valueList.add(toData(e)); } ClientMessage request = RingbufferAddAllAsyncCodec.encodeRequest(name, valueList, overflowPolicy.getId()); request.setPartitionId(partitionId); try { ClientInvocationFuture invocationFuture = new ClientInvocation(getClient(), request).invoke(); return new ClientDelegatingFuture<Long>( invocationFuture, getContext().getSerializationService(), ADD_ALL_ASYNC_RESPONSE_DECODER); } catch (Exception e) { throw ExceptionUtil.rethrow(e); } }
@Override public void initialize() { final NodeEngine nodeEngine = getNodeEngine(); CollectionConfig config = getConfig(nodeEngine); final List<ItemListenerConfig> itemListenerConfigs = config.getItemListenerConfigs(); for (ItemListenerConfig itemListenerConfig : itemListenerConfigs) { ItemListener listener = itemListenerConfig.getImplementation(); if (listener == null && itemListenerConfig.getClassName() != null) { try { listener = ClassLoaderUtil.newInstance( nodeEngine.getConfigClassLoader(), itemListenerConfig.getClassName()); } catch (Exception e) { throw ExceptionUtil.rethrow(e); } } if (listener != null) { if (listener instanceof HazelcastInstanceAware) { ((HazelcastInstanceAware) listener) .setHazelcastInstance(nodeEngine.getHazelcastInstance()); } addItemListener(listener, itemListenerConfig.isIncludeValue()); } } }
private void enableStatisticManagementOnNodes( String cacheName, boolean statOrMan, boolean enabled) { if (isClosed()) { throw new IllegalStateException(); } if (cacheName == null) { throw new NullPointerException(); } final ClientInvocationService invocationService = clientContext.getInvocationService(); final Collection<MemberImpl> members = clientContext.getClusterService().getMemberList(); final Collection<Future> futures = new ArrayList<Future>(); for (MemberImpl member : members) { try { CacheManagementConfigRequest request = new CacheManagementConfigRequest( getCacheNameWithPrefix(cacheName), statOrMan, enabled, member.getAddress()); final Future future = invocationService.invokeOnTarget(request, member.getAddress()); futures.add(future); } catch (Exception e) { ExceptionUtil.sneakyThrow(e); } } // make sure all configs are created try { FutureUtil.waitWithDeadline( futures, CacheProxyUtil.AWAIT_COMPLETION_TIMEOUT_SECONDS, TimeUnit.SECONDS); } catch (TimeoutException e) { logger.warning(e); } }
public void commit() throws TransactionException, IllegalStateException { try { if (transactionType.equals(TransactionType.TWO_PHASE) && state != PREPARED) { throw new IllegalStateException("Transaction is not prepared"); } if (transactionType.equals(TransactionType.LOCAL) && state != ACTIVE) { throw new IllegalStateException("Transaction is not active"); } checkThread(); checkTimeout(); try { final List<Future> futures = new ArrayList<Future>(txLogs.size()); state = COMMITTING; for (TransactionLog txLog : txLogs) { futures.add(txLog.commit(nodeEngine)); } for (Future future : futures) { try { future.get(COMMIT_TIMEOUT_MINUTES, TimeUnit.MINUTES); } catch (Throwable e) { nodeEngine.getLogger(getClass()).warning("Error during commit!", e); } } state = COMMITTED; // purge tx backup purgeTxBackups(); } catch (Throwable e) { state = COMMIT_FAILED; throw ExceptionUtil.rethrow(e, TransactionException.class); } } finally { setThreadFlag(null); } }
public void rollback() throws IllegalStateException { try { if (state == NO_TXN || state == ROLLED_BACK) { throw new IllegalStateException("Transaction is not active"); } checkThread(); state = ROLLING_BACK; try { rollbackTxBackup(); final List<Future> futures = new ArrayList<Future>(txLogs.size()); final ListIterator<TransactionLog> iter = txLogs.listIterator(txLogs.size()); while (iter.hasPrevious()) { final TransactionLog txLog = iter.previous(); futures.add(txLog.rollback(nodeEngine)); } for (Future future : futures) { try { future.get(ROLLBACK_TIMEOUT_MINUTES, TimeUnit.MINUTES); } catch (Throwable e) { nodeEngine.getLogger(getClass()).warning("Error during rollback!", e); } } // purge tx backup purgeTxBackups(); } catch (Throwable e) { throw ExceptionUtil.rethrow(e); } finally { state = ROLLED_BACK; } } finally { setThreadFlag(null); } }
public ClientProxy getOrCreateProxy(String service, String id) { final ObjectNamespace ns = new DefaultObjectNamespace(service, id); ClientProxyFuture proxyFuture = proxies.get(ns); if (proxyFuture != null) { return proxyFuture.get(); } final ClientProxyFactory factory = proxyFactories.get(service); if (factory == null) { throw new IllegalArgumentException("No factory registered for service: " + service); } final ClientProxy clientProxy = factory.create(id); proxyFuture = new ClientProxyFuture(); final ClientProxyFuture current = proxies.putIfAbsent(ns, proxyFuture); if (current != null) { return current.get(); } try { initialize(clientProxy); } catch (Exception e) { proxies.remove(ns); proxyFuture.set(e); throw ExceptionUtil.rethrow(e); } proxyFuture.set(clientProxy); return clientProxy; }
private <T> T invoke(Object req) { try { return getContext().getInvocationService().invokeOnKeyOwner(req, getKeyData()); } catch (Exception e) { throw ExceptionUtil.rethrow(e); } }
Joiner createJoiner() { JoinConfig join = config.getNetworkConfig().getJoin(); join.verify(); if (groupProperties.getBoolean(GroupProperty.DISCOVERY_SPI_ENABLED)) { // TODO: Auto-Upgrade Multicast+AWS configuration! logger.info("Activating Discovery SPI Joiner"); return new DiscoveryJoiner(this, discoveryService); } else { if (join.getMulticastConfig().isEnabled() && multicastService != null) { logger.info("Creating MulticastJoiner"); return new MulticastJoiner(this); } else if (join.getTcpIpConfig().isEnabled()) { logger.info("Creating TcpIpJoiner"); return new TcpIpJoiner(this); } else if (join.getAwsConfig().isEnabled()) { Class clazz; try { logger.info("Creating AWSJoiner"); clazz = Class.forName("com.hazelcast.cluster.impl.TcpIpJoinerOverAWS"); Constructor constructor = clazz.getConstructor(Node.class); return (Joiner) constructor.newInstance(this); } catch (Exception e) { throw ExceptionUtil.rethrow(e); } } } return null; }
public int size() { checkTransactionState(); try { final OperationService operationService = getNodeEngine().getOperationService(); final Map<Integer, Object> results = operationService.invokeOnAllPartitions( MultiMapService.SERVICE_NAME, new MultiMapOperationFactory( name, MultiMapOperationFactory.OperationFactoryType.SIZE)); int size = 0; for (Object obj : results.values()) { if (obj == null) { continue; } Integer result = getNodeEngine().toObject(obj); size += result; } for (Data key : txMap.keySet()) { MultiMapTransactionLog log = (MultiMapTransactionLog) tx.getTransactionLog(getTxLogKey(key)); if (log != null) { size += log.size(); } } return size; } catch (Throwable t) { throw ExceptionUtil.rethrow(t); } }
protected Set<Data> keySetInternal() { final NodeEngine nodeEngine = getNodeEngine(); try { Map<Integer, Object> results = nodeEngine .getOperationService() .invokeOnAllPartitions( CollectionService.SERVICE_NAME, new MultiMapOperationFactory(proxyId, OperationFactoryType.KEY_SET)); Set<Data> keySet = new HashSet<Data>(); for (Object result : results.values()) { if (result == null) { continue; } CollectionResponse response = nodeEngine.toObject(result); if (response.getCollection() != null) { keySet.addAll(response.getCollection()); } } return keySet; } catch (Throwable throwable) { throw ExceptionUtil.rethrow(throwable); } }
private void _sendAndHandle( ConnectionFactory connectionFactory, Object obj, ResponseHandler handler) throws IOException { ResponseStream stream = null; while (stream == null) { if (!active) { throw new HazelcastInstanceNotActiveException(); } Connection conn = null; try { conn = connectionFactory.create(); final SerializationService serializationService = getSerializationService(); final Data request = serializationService.toData(obj); conn.write(request); stream = new ResponseStreamImpl(serializationService, conn); } catch (Exception e) { if (e instanceof IOException) { if (logger.isFinestEnabled()) { logger.finest("Error on connection... conn: " + conn + ", error: " + e); } } if (conn != null) { IOUtil.closeResource(conn); } if (ErrorHandler.isRetryable(e)) { if (redoOperation || obj instanceof RetryableRequest) { if (logger.isFinestEnabled()) { logger.finest("Retrying " + obj + ", last-conn: " + conn + ", last-error: " + e); } beforeRetry(); continue; } } if (e instanceof IOException && !active) { continue; } throw ExceptionUtil.rethrow(e, IOException.class); } } try { handler.handle(stream); } catch (Exception e) { throw ExceptionUtil.rethrow(e, IOException.class); } finally { stream.end(); } }
protected CollectionResponse removeInternal(Data dataKey) { try { RemoveAllOperation operation = new RemoveAllOperation(proxyId, dataKey, getThreadId()); return invoke(operation, dataKey); } catch (Throwable throwable) { throw ExceptionUtil.rethrow(throwable); } }
protected CollectionResponse getAllInternal(Data dataKey) { try { GetAllOperation operation = new GetAllOperation(proxyId, dataKey); return invoke(operation, dataKey); } catch (Throwable throwable) { throw ExceptionUtil.rethrow(throwable); } }
protected Boolean removeInternal(Data dataKey, Data dataValue) { try { RemoveOperation operation = new RemoveOperation(proxyId, dataKey, getThreadId(), dataValue); return invoke(operation, dataKey); } catch (Throwable throwable) { throw ExceptionUtil.rethrow(throwable); } }
protected Boolean putInternal(Data dataKey, Data dataValue, int index) { try { PutOperation operation = new PutOperation(proxyId, dataKey, getThreadId(), dataValue, index); return invoke(operation, dataKey); } catch (Throwable throwable) { throw ExceptionUtil.rethrow(throwable); } }
protected Integer indexOfInternal(Data dataKey, Data value, boolean last) { try { IndexOfOperation operation = new IndexOfOperation(proxyId, dataKey, value, last); return invoke(operation, dataKey); } catch (Throwable throwable) { throw ExceptionUtil.rethrow(throwable); } }
protected Object setInternal(Data dataKey, int index, Data dataValue) { try { SetOperation operation = new SetOperation(proxyId, dataKey, getThreadId(), index, dataValue); return invokeData(operation, dataKey); } catch (Throwable throwable) { throw ExceptionUtil.rethrow(throwable); } }
protected Boolean containsAllInternal(Data dataKey, Set<Data> dataSet) { try { ContainsAllOperation operation = new ContainsAllOperation(proxyId, dataKey, dataSet); return invoke(operation, dataKey); } catch (Throwable throwable) { throw ExceptionUtil.rethrow(throwable); } }
protected Boolean containsInternalList(Data dataKey, Data dataValue) { try { ContainsOperation operation = new ContainsOperation(proxyId, dataKey, dataValue); return invoke(operation, dataKey); } catch (Throwable throwable) { throw ExceptionUtil.rethrow(throwable); } }