@Test public void testSubmitToKeyOwnerCallable() throws Exception { final int k = simpleTestNodeCount; TestHazelcastInstanceFactory factory = createHazelcastInstanceFactory(k); final HazelcastInstance[] instances = factory.newInstances(new Config()); final AtomicInteger count = new AtomicInteger(0); final CountDownLatch latch = new CountDownLatch(k / 2); final ExecutionCallback callback = new ExecutionCallback() { public void onResponse(Object response) { if ((Boolean) response) count.incrementAndGet(); latch.countDown(); } public void onFailure(Throwable t) {} }; for (int i = 0; i < k; i++) { final HazelcastInstance instance = instances[i]; final IExecutorService service = instance.getExecutorService("testSubmitToKeyOwnerCallable"); final String script = "hazelcast.getCluster().getLocalMember().equals(member)"; final HashMap map = new HashMap(); final Member localMember = instance.getCluster().getLocalMember(); map.put("member", localMember); int key = 0; while (!localMember.equals(instance.getPartitionService().getPartition(++key).getOwner())) ; if (i % 2 == 0) { final Future f = service.submitToKeyOwner(new ScriptCallable(script, map), key); assertTrue((Boolean) f.get(5, TimeUnit.SECONDS)); } else { service.submitToKeyOwner(new ScriptCallable(script, map), key, callback); } } assertTrue(latch.await(30, TimeUnit.SECONDS)); assertEquals(k / 2, count.get()); }
private String resolveTypeDiscriminatorForBackReference(Member m) { Method me = ReflectionUtils.findMethod( m.getDeclaringClass(), "get" + firstCharToUpperCase(m.getName())); if (me != null) { return resolveTypeDiscriminator(resolveReturnType(me)); } return ""; }
public static void handleMemberMaintenance( InstanceMaintenanceModeEvent instanceMaintenanceModeEvent) throws InvalidMemberException, InvalidCartridgeTypeException { Topology topology = TopologyManager.getTopology(); Service service = topology.getService(instanceMaintenanceModeEvent.getServiceName()); // update the status of the member if (service == null) { log.warn( String.format( "Service %s does not exist", instanceMaintenanceModeEvent.getServiceName())); return; } Cluster cluster = service.getCluster(instanceMaintenanceModeEvent.getClusterId()); if (cluster == null) { log.warn( String.format("Cluster %s does not exist", instanceMaintenanceModeEvent.getClusterId())); return; } Member member = cluster.getMember(instanceMaintenanceModeEvent.getMemberId()); if (member == null) { log.warn( String.format("Member %s does not exist", instanceMaintenanceModeEvent.getMemberId())); return; } MemberMaintenanceModeEvent memberMaintenanceModeEvent = new MemberMaintenanceModeEvent( instanceMaintenanceModeEvent.getServiceName(), instanceMaintenanceModeEvent.getClusterId(), instanceMaintenanceModeEvent.getClusterInstanceId(), instanceMaintenanceModeEvent.getMemberId(), instanceMaintenanceModeEvent.getNetworkPartitionId(), instanceMaintenanceModeEvent.getPartitionId()); try { TopologyManager.acquireWriteLock(); // try update lifecycle state if (!member.isStateTransitionValid(MemberStatus.In_Maintenance)) { log.error( "Invalid State Transition from " + member.getStatus() + " to " + MemberStatus.In_Maintenance); return; } member.setStatus(MemberStatus.In_Maintenance); log.info("member maintenance mode event adding status started"); TopologyManager.updateTopology(topology); } finally { TopologyManager.releaseWriteLock(); } // publishing data TopologyEventPublisher.sendMemberMaintenanceModeEvent(memberMaintenanceModeEvent); }
@Test public void testInstance() { assertNotNull(instance); final Set<Member> members = instance.getCluster().getMembers(); assertEquals(1, members.size()); final Member member = members.iterator().next(); final InetSocketAddress inetSocketAddress = member.getInetSocketAddress(); assertEquals(5700, inetSocketAddress.getPort()); assertEquals("test-instance", config.getInstanceName()); assertEquals("HAZELCAST_ENTERPRISE_LICENSE_KEY", config.getLicenseKey()); }
/** Special constructor called from Class. */ NamedIterator(Vector v, Object type, int modifierMask) { // Using type 'Object' instead of 'java.lang.Class' for parameter type // is a workaround to avoid having to generate fully qualified class names // when applying BeatyJ to this code. (Otherwise, would confuse with class // 'Class' in this package.) this(); for (Enumeration e = v.elements(); e.hasMoreElements(); ) { Member m = (Member) e.nextElement(); int mod = m.getModifier(); if (((modifierMask & mod) != 0) && (m.getClass().isAssignableFrom((java.lang.Class) type))) { data.addElement(m); } } reset(); lockReadonly(); // read-only in this mode }
/** * Method called to check if we can use the passed method or constructor (wrt access restriction * -- public methods can be called, others usually not); and if not, if there is a work-around for * the problem. */ public static void checkAndFixAccess(Member member) { // We know all members are also accessible objects... AccessibleObject ao = (AccessibleObject) member; /* 14-Jan-2009, tatu: It seems safe and potentially beneficial to * always to make it accessible (latter because it will force * skipping checks we have no use for...), so let's always call it. */ // if (!ao.isAccessible()) { try { ao.setAccessible(true); } catch (SecurityException se) { /* 17-Apr-2009, tatu: Related to [JACKSON-101]: this can fail on * platforms like EJB and Google App Engine); so let's * only fail if we really needed it... */ if (!ao.isAccessible()) { Class<?> declClass = member.getDeclaringClass(); throw new IllegalArgumentException( "Can not access " + member + " (from class " + declClass.getName() + "; failed to set access: " + se.getMessage()); } } // } }
/** * Remove member from topology and send member terminated event. * * @param serviceName * @param clusterId * @param networkPartitionId * @param partitionId * @param memberId */ public static void handleMemberTerminated( String serviceName, String clusterId, String networkPartitionId, String partitionId, String memberId) { Topology topology = TopologyManager.getTopology(); Service service = topology.getService(serviceName); Properties properties; if (service == null) { log.warn(String.format("Service %s does not exist", serviceName)); return; } Cluster cluster = service.getCluster(clusterId); if (cluster == null) { log.warn(String.format("Cluster %s does not exist", clusterId)); return; } Member member = cluster.getMember(memberId); if (member == null) { log.warn(String.format("Member %s does not exist", memberId)); return; } String clusterInstanceId = member.getClusterInstanceId(); try { TopologyManager.acquireWriteLock(); properties = member.getProperties(); cluster.removeMember(member); TopologyManager.updateTopology(topology); } finally { TopologyManager.releaseWriteLock(); } /* @TODO leftover from grouping_poc*/ String groupAlias = null; TopologyEventPublisher.sendMemberTerminatedEvent( serviceName, clusterId, memberId, clusterInstanceId, networkPartitionId, partitionId, properties, groupAlias); }
/** * Add member object to the topology and publish member created event * * @param memberContext */ public static void handleMemberCreatedEvent(MemberContext memberContext) { Topology topology = TopologyManager.getTopology(); Service service = topology.getService(memberContext.getCartridgeType()); String clusterId = memberContext.getClusterId(); Cluster cluster = service.getCluster(clusterId); String memberId = memberContext.getMemberId(); String clusterInstanceId = memberContext.getClusterInstanceId(); String networkPartitionId = memberContext.getNetworkPartitionId(); String partitionId = memberContext.getPartition().getId(); String lbClusterId = memberContext.getLbClusterId(); long initTime = memberContext.getInitTime(); if (cluster.memberExists(memberId)) { log.warn(String.format("Member %s already exists", memberId)); return; } try { TopologyManager.acquireWriteLock(); Member member = new Member( service.getServiceName(), clusterId, memberId, clusterInstanceId, networkPartitionId, partitionId, memberContext.getLoadBalancingIPType(), initTime); member.setStatus(MemberStatus.Created); member.setLbClusterId(lbClusterId); member.setProperties(CloudControllerUtil.toJavaUtilProperties(memberContext.getProperties())); cluster.addMember(member); TopologyManager.updateTopology(topology); } finally { TopologyManager.releaseWriteLock(); } TopologyEventPublisher.sendMemberCreatedEvent(memberContext); }
private void generateSetBasedDocRefView( Map<String, org.ektorp.support.DesignDocument.View> views, Member me, DocumentReferences referenceMetaData) { String fieldName = firstCharToLowerCase(me.getName()); String orderBy = referenceMetaData.orderBy(); String backRef = referenceMetaData.backReference(); if (backRef.length() == 0) { throw new ViewGenerationException( String.format( "The DocumentReferences annotation in %s must specify a backReference", me.getDeclaringClass())); } String viewName = NameConventions.backReferenceViewName(fieldName); String typeDiscriminator = resolveTypeDiscriminatorForBackReference(me); if (orderBy.length() > 0) { views.put( viewName, generateDocRefsAsSetWithOrderByView(backRef, fieldName, orderBy, typeDiscriminator)); } else { views.put(viewName, generateDocRefsAsSetView(backRef, fieldName, typeDiscriminator)); } }
/** Parsing members means creating new members for definitions with more than one declarator. */ public void parse() { boolean clone_and_parse = true; if (extendVector == null) throw new RuntimeException("Compiler Error: extendVector not set!"); if (type_spec.typeSpec() instanceof ScopedName) { token = type_spec.typeSpec().get_token(); type_spec = ((ScopedName) type_spec.typeSpec()).resolvedTypeSpec(); clone_and_parse = false; if (type_spec instanceof ConstrTypeSpec) { if (((ConstrTypeSpec) type_spec.typeSpec()).c_type_spec instanceof StructType) { // System.out.println("Struct " + containing_struct.typeName() + " contains struct " + // ((ConstrTypeSpec)type_spec.typeSpec()).typeName()); if (((ConstrTypeSpec) type_spec.typeSpec()) .c_type_spec .typeName() .equals(containing_struct.typeName())) { parser.fatal_error( "Illegal recursion in struct (use sequence<" + containing_struct.typeName() + "> instead)", token); } } } } else if (type_spec.typeSpec() instanceof SequenceType) { TypeSpec ts = ((SequenceType) type_spec.typeSpec()).elementTypeSpec().typeSpec(); SequenceType seqTs = (SequenceType) type_spec.typeSpec(); while (ts instanceof SequenceType) { seqTs = (SequenceType) ts; ts = ((SequenceType) ts.typeSpec()).elementTypeSpec().typeSpec(); } // if( ts.typeName().equals( containing_struct.typeName()) || if (ScopedName.isRecursionScope(ts.typeName())) { seqTs.setRecursive(); } } else if (type_spec instanceof ConstrTypeSpec) { type_spec.parse(); } for (Enumeration e = declarators.v.elements(); e.hasMoreElements(); ) { Declarator d = (Declarator) e.nextElement(); // we don't parse the declarator itself // as that would result in its name getting defined // we define the declarator's name as a type name indirectly // through the cloned type specs. Member m = new Member(new_num()); m.declarator = d; TypeSpec ts = type_spec.typeSpec(); /* create a separate type spec copy for every declarator if the type spec is a new type definition, i.e. a struct, enum, union, sequence or the declarator is an array declarator */ // if( clone_and_parse && !(ts instanceof BaseType) ) if (clone_and_parse || d.d instanceof ArrayDeclarator) { /* arrays need special treatment */ if (d.d instanceof ArrayDeclarator) { ts = new ArrayTypeSpec(new_num(), ts, (ArrayDeclarator) d.d, pack_name); ts.parse(); } else if (!(ts instanceof BaseType)) { ts = (TypeSpec) ts.clone(); if (!(ts instanceof ConstrTypeSpec)) ts.set_name(d.name()); /* important: only parse type specs once (we do it for the last declarator only) */ if (!e.hasMoreElements()) ts.parse(); } } // else if (!(d.d instanceof ArrayDeclarator)) { try { NameTable.define(containing_struct + "." + d.name(), "declarator"); } catch (NameAlreadyDefined nad) { parser.error("Declarator " + d.name() + " already defined in scope.", token); } } /* if the type spec is a scoped name, it is already parsed and * the type name is defined */ // if( clone_and_parse ) // ts.parse(); m.type_spec = ts; m.pack_name = this.pack_name; m.name = this.name; extendVector.addElement(m); } declarators = null; }
public static void handleMemberActivated(InstanceActivatedEvent instanceActivatedEvent) { Topology topology = TopologyManager.getTopology(); Service service = topology.getService(instanceActivatedEvent.getServiceName()); if (service == null) { log.warn(String.format("Service %s does not exist", instanceActivatedEvent.getServiceName())); return; } Cluster cluster = service.getCluster(instanceActivatedEvent.getClusterId()); if (cluster == null) { log.warn(String.format("Cluster %s does not exist", instanceActivatedEvent.getClusterId())); return; } Member member = cluster.getMember(instanceActivatedEvent.getMemberId()); if (member == null) { log.warn(String.format("Member %s does not exist", instanceActivatedEvent.getMemberId())); return; } MemberActivatedEvent memberActivatedEvent = new MemberActivatedEvent( instanceActivatedEvent.getServiceName(), instanceActivatedEvent.getClusterId(), instanceActivatedEvent.getClusterInstanceId(), instanceActivatedEvent.getMemberId(), instanceActivatedEvent.getNetworkPartitionId(), instanceActivatedEvent.getPartitionId()); // grouping - set grouid // TODO memberActivatedEvent.setApplicationId(null); try { TopologyManager.acquireWriteLock(); // try update lifecycle state if (!member.isStateTransitionValid(MemberStatus.Active)) { log.error( "Invalid state transition from [" + member.getStatus() + "] to [" + MemberStatus.Active + "]"); return; } else { member.setStatus(MemberStatus.Active); // Set member ports try { Cartridge cartridge = CloudControllerContext.getInstance().getCartridge(service.getServiceName()); if (cartridge == null) { throw new RuntimeException( String.format( "Cartridge not found: [cartridge-type] %s", service.getServiceName())); } Port port; int portValue; List<PortMapping> portMappings = Arrays.asList(cartridge.getPortMappings()); String clusterId = cluster.getClusterId(); ClusterContext clusterContext = CloudControllerContext.getInstance().getClusterContext(clusterId); List<KubernetesService> kubernetesServices = clusterContext.getKubernetesServices(); for (PortMapping portMapping : portMappings) { if (kubernetesServices != null) { portValue = findKubernetesServicePort(clusterId, kubernetesServices, portMapping); } else { portValue = portMapping.getPort(); } port = new Port(portMapping.getProtocol(), portValue, portMapping.getProxyPort()); member.addPort(port); memberActivatedEvent.addPort(port); } } catch (Exception e) { String message = String.format( "Could not add member ports: [service-name] %s [member-id] %s", memberActivatedEvent.getServiceName(), memberActivatedEvent.getMemberId()); log.error(message, e); } // Set member ip addresses memberActivatedEvent.setDefaultPrivateIP(member.getDefaultPrivateIP()); memberActivatedEvent.setMemberPrivateIPs(member.getMemberPrivateIPs()); memberActivatedEvent.setDefaultPublicIP(member.getDefaultPublicIP()); memberActivatedEvent.setMemberPublicIPs(member.getMemberPublicIPs()); TopologyManager.updateTopology(topology); // Publish member activated event TopologyEventPublisher.sendMemberActivatedEvent(memberActivatedEvent); // Publish statistics data BAMUsageDataPublisher.publish( memberActivatedEvent.getMemberId(), memberActivatedEvent.getPartitionId(), memberActivatedEvent.getNetworkPartitionId(), memberActivatedEvent.getClusterId(), memberActivatedEvent.getServiceName(), MemberStatus.Active.toString(), null); } } finally { TopologyManager.releaseWriteLock(); } }
public static void handleMemberStarted(InstanceStartedEvent instanceStartedEvent) { try { Topology topology = TopologyManager.getTopology(); Service service = topology.getService(instanceStartedEvent.getServiceName()); if (service == null) { log.warn(String.format("Service %s does not exist", instanceStartedEvent.getServiceName())); return; } if (!service.clusterExists(instanceStartedEvent.getClusterId())) { log.warn( String.format( "Cluster %s does not exist in service %s", instanceStartedEvent.getClusterId(), instanceStartedEvent.getServiceName())); return; } Cluster cluster = service.getCluster(instanceStartedEvent.getClusterId()); Member member = cluster.getMember(instanceStartedEvent.getMemberId()); if (member == null) { log.warn(String.format("Member %s does not exist", instanceStartedEvent.getMemberId())); return; } try { TopologyManager.acquireWriteLock(); // try update lifecycle state if (!member.isStateTransitionValid(MemberStatus.Starting)) { log.error( "Invalid State Transition from " + member.getStatus() + " to " + MemberStatus.Starting); return; } else { member.setStatus(MemberStatus.Starting); log.info("member started event adding status started"); TopologyManager.updateTopology(topology); // memberStartedEvent. TopologyEventPublisher.sendMemberStartedEvent(instanceStartedEvent); // publishing data BAMUsageDataPublisher.publish( instanceStartedEvent.getMemberId(), instanceStartedEvent.getPartitionId(), instanceStartedEvent.getNetworkPartitionId(), instanceStartedEvent.getClusterId(), instanceStartedEvent.getServiceName(), MemberStatus.Starting.toString(), null); } } finally { TopologyManager.releaseWriteLock(); } } catch (Exception e) { String message = String.format( "Could not handle member started event: [application-id] %s " + "[service-name] %s [member-id] %s", instanceStartedEvent.getApplicationId(), instanceStartedEvent.getServiceName(), instanceStartedEvent.getMemberId()); log.warn(message, e); } }
/** * Update member status to initialized and publish member initialized event * * @param memberContext */ public static void handleMemberInitializedEvent(MemberContext memberContext) { Topology topology = TopologyManager.getTopology(); Service service = topology.getService(memberContext.getCartridgeType()); if (service == null) { log.warn(String.format("Service %s does not exist", memberContext.getCartridgeType())); return; } if (!service.clusterExists(memberContext.getClusterId())) { log.warn( String.format( "Cluster %s does not exist in service %s", memberContext.getClusterId(), memberContext.getCartridgeType())); return; } Member member = service.getCluster(memberContext.getClusterId()).getMember(memberContext.getMemberId()); if (member == null) { log.warn(String.format("Member %s does not exist", memberContext.getMemberId())); return; } try { TopologyManager.acquireWriteLock(); // Set ip addresses member.setDefaultPrivateIP(memberContext.getDefaultPrivateIP()); if (memberContext.getPrivateIPs() != null) { member.setMemberPrivateIPs(Arrays.asList(memberContext.getPrivateIPs())); } member.setDefaultPublicIP(memberContext.getDefaultPublicIP()); if (memberContext.getPublicIPs() != null) { member.setMemberPublicIPs(Arrays.asList(memberContext.getPublicIPs())); } // try update lifecycle state if (!member.isStateTransitionValid(MemberStatus.Initialized)) { log.error( "Invalid state transition from " + member.getStatus() + " to " + MemberStatus.Initialized); return; } else { member.setStatus(MemberStatus.Initialized); log.info("Member status updated to initialized"); TopologyManager.updateTopology(topology); TopologyEventPublisher.sendMemberInitializedEvent(memberContext); // publishing data BAMUsageDataPublisher.publish( memberContext.getMemberId(), memberContext.getPartition().getId(), memberContext.getNetworkPartitionId(), memberContext.getClusterId(), memberContext.getCartridgeType(), MemberStatus.Initialized.toString(), null); } } finally { TopologyManager.releaseWriteLock(); } }
@NotNull public Class classToClass(@NotNull PsiClass psiClass) { Set<String> modifiers = modifiersListToModifiersSet(psiClass.getModifierList()); List<Field> fields = fieldsToFieldList(psiClass.getFields(), psiClass); List<Element> typeParameters = elementsToElementList(psiClass.getTypeParameters()); List<Type> implementsTypes = typesToNotNullableTypeList(psiClass.getImplementsListTypes()); List<Type> extendsTypes = typesToNotNullableTypeList(psiClass.getExtendsListTypes()); IdentifierImpl name = new IdentifierImpl(psiClass.getName()); List<Expression> baseClassParams = new LinkedList<Expression>(); List<Member> members = getMembers(psiClass); // we try to find super() call and generate class declaration like that: class A(name: String, i // : Int) : Base(name) SuperVisitor visitor = new SuperVisitor(); psiClass.accept(visitor); Collection<PsiExpressionList> resolvedSuperCallParameters = visitor.getResolvedSuperCallParameters(); if (resolvedSuperCallParameters.size() == 1) { baseClassParams.addAll( expressionsToExpressionList( resolvedSuperCallParameters.toArray(new PsiExpressionList[1])[0].getExpressions())); } // we create primary constructor from all non final fields and fields without initializers if (!psiClass.isEnum() && !psiClass.isInterface() && psiClass.getConstructors().length > 1 && getPrimaryConstructorForThisCase(psiClass) == null) { List<Field> finalOrWithEmptyInitializer = getFinalOrWithEmptyInitializer(fields); Map<String, String> initializers = new HashMap<String, String>(); for (Member m : members) { // and modify secondaries if (m.getKind() == INode.Kind.CONSTRUCTOR) { Function f = (Function) m; if (!((Constructor) f).isPrimary()) { for (Field fo : finalOrWithEmptyInitializer) { String init = getDefaultInitializer(fo); initializers.put(fo.getIdentifier().toKotlin(), init); } List<Statement> newStatements = new LinkedList<Statement>(); for (Statement s : f.getBlock().getStatements()) { boolean isRemoved = false; if (s.getKind() == INode.Kind.ASSIGNMENT_EXPRESSION) { AssignmentExpression assignmentExpression = (AssignmentExpression) s; if (assignmentExpression.getLeft().getKind() == INode.Kind.CALL_CHAIN) { for (Field fo : finalOrWithEmptyInitializer) { String id = fo.getIdentifier().toKotlin(); if (((CallChainExpression) assignmentExpression.getLeft()) .getIdentifier() .toKotlin() .endsWith("." + id)) { initializers.put(id, assignmentExpression.getRight().toKotlin()); isRemoved = true; } } } } if (!isRemoved) { newStatements.add(s); } } newStatements.add( 0, new DummyStringExpression( "val __ = " + createPrimaryConstructorInvocation( name.toKotlin(), finalOrWithEmptyInitializer, initializers))); f.setBlock(new Block(newStatements)); } } } members.add( new Constructor( Identifier.EMPTY_IDENTIFIER, Collections.<String>emptySet(), new ClassType(name), Collections.<Element>emptyList(), new ParameterList(createParametersFromFields(finalOrWithEmptyInitializer)), new Block(createInitStatementsFromFields(finalOrWithEmptyInitializer)), true)); } if (psiClass.isInterface()) { return new Trait( this, name, modifiers, typeParameters, extendsTypes, Collections.<Expression>emptyList(), implementsTypes, members); } if (psiClass.isEnum()) { return new Enum( this, name, modifiers, typeParameters, Collections.<Type>emptyList(), Collections.<Expression>emptyList(), implementsTypes, members); } return new Class( this, name, modifiers, typeParameters, extendsTypes, baseClassParams, implementsTypes, members); }
public FlushResult call() throws Exception { // For each measure and each star, ask the index // which headers intersect. final List<SegmentHeader> headers = new ArrayList<SegmentHeader>(); final List<Member> measures = CacheControlImpl.findMeasures(region); final SegmentColumn[] flushRegion = CacheControlImpl.findAxisValues(region); final List<RolapStar> starList = CacheControlImpl.getStarList(region); for (Member member : measures) { if (!(member instanceof RolapStoredMeasure)) { continue; } final RolapStoredMeasure storedMeasure = (RolapStoredMeasure) member; final RolapStar star = storedMeasure.getCube().getStar(); final SegmentCacheIndex index = cacheMgr.indexRegistry.getIndex(star); headers.addAll( index.intersectRegion( member.getDimension().getSchema().getName(), ((RolapSchema) member.getDimension().getSchema()).getChecksum(), storedMeasure.getCube().getName(), storedMeasure.getName(), storedMeasure.getCube().getStar().getFactTable().getAlias(), flushRegion)); } // If flushRegion is empty, this means we must clear all // segments for the region's measures. if (flushRegion.length == 0) { for (final SegmentHeader header : headers) { for (RolapStar star : starList) { cacheMgr.indexRegistry.getIndex(star).remove(header); } // Remove the segment from external caches. Use an // executor, because it may take some time. We discard // the future, because we don't care too much if it fails. cacheControlImpl.trace( "discard segment - it cannot be constrained and maintain consistency:\n" + header.getDescription()); final Future<?> task = cacheMgr.cacheExecutor.submit( new Runnable() { public void run() { try { // Note that the SegmentCache API doesn't // require us to verify that the segment // exists (by calling "contains") before we // call "remove". cacheMgr.compositeCache.remove(header); } catch (Throwable e) { LOGGER.warn("remove header failed: " + header, e); } } }); Util.safeGet(task, "SegmentCacheManager.flush"); } return new FlushResult(Collections.<Callable<Boolean>>emptyList()); } // Now we know which headers intersect. For each of them, // we append an excluded region. // // TODO: Optimize the logic here. If a segment is mostly // empty, we should trash it completely. final List<Callable<Boolean>> callableList = new ArrayList<Callable<Boolean>>(); for (final SegmentHeader header : headers) { if (!header.canConstrain(flushRegion)) { // We have to delete that segment altogether. cacheControlImpl.trace( "discard segment - it cannot be constrained and maintain consistency:\n" + header.getDescription()); for (RolapStar star : starList) { cacheMgr.indexRegistry.getIndex(star).remove(header); } continue; } final SegmentHeader newHeader = header.constrain(flushRegion); for (final SegmentCacheWorker worker : cacheMgr.segmentCacheWorkers) { callableList.add( new Callable<Boolean>() { public Boolean call() throws Exception { boolean existed; if (worker.supportsRichIndex()) { final SegmentBody sb = worker.get(header); existed = worker.remove(header); if (sb != null) { worker.put(newHeader, sb); } } else { // The cache doesn't support rich index. We // have to clear the segment entirely. existed = worker.remove(header); } return existed; } }); } for (RolapStar star : starList) { SegmentCacheIndex index = cacheMgr.indexRegistry.getIndex(star); index.remove(header); index.add(newHeader, false, null); } } // Done return new FlushResult(callableList); }
public static void handleMemberReadyToShutdown( InstanceReadyToShutdownEvent instanceReadyToShutdownEvent) throws InvalidMemberException, InvalidCartridgeTypeException { Topology topology = TopologyManager.getTopology(); Service service = topology.getService(instanceReadyToShutdownEvent.getServiceName()); // update the status of the member if (service == null) { log.warn( String.format( "Service %s does not exist", instanceReadyToShutdownEvent.getServiceName())); return; } Cluster cluster = service.getCluster(instanceReadyToShutdownEvent.getClusterId()); if (cluster == null) { log.warn( String.format("Cluster %s does not exist", instanceReadyToShutdownEvent.getClusterId())); return; } Member member = cluster.getMember(instanceReadyToShutdownEvent.getMemberId()); if (member == null) { log.warn( String.format("Member %s does not exist", instanceReadyToShutdownEvent.getMemberId())); return; } MemberReadyToShutdownEvent memberReadyToShutdownEvent = new MemberReadyToShutdownEvent( instanceReadyToShutdownEvent.getServiceName(), instanceReadyToShutdownEvent.getClusterId(), instanceReadyToShutdownEvent.getClusterInstanceId(), instanceReadyToShutdownEvent.getMemberId(), instanceReadyToShutdownEvent.getNetworkPartitionId(), instanceReadyToShutdownEvent.getPartitionId()); try { TopologyManager.acquireWriteLock(); if (!member.isStateTransitionValid(MemberStatus.ReadyToShutDown)) { log.error( "Invalid State Transition from " + member.getStatus() + " to " + MemberStatus.ReadyToShutDown); return; } member.setStatus(MemberStatus.ReadyToShutDown); log.info("Member Ready to shut down event adding status started"); TopologyManager.updateTopology(topology); } finally { TopologyManager.releaseWriteLock(); } TopologyEventPublisher.sendMemberReadyToShutdownEvent(memberReadyToShutdownEvent); // publishing data BAMUsageDataPublisher.publish( instanceReadyToShutdownEvent.getMemberId(), instanceReadyToShutdownEvent.getPartitionId(), instanceReadyToShutdownEvent.getNetworkPartitionId(), instanceReadyToShutdownEvent.getClusterId(), instanceReadyToShutdownEvent.getServiceName(), MemberStatus.ReadyToShutDown.toString(), null); // termination of particular instance will be handled by autoscaler }
private void buildForProject( JavaProject project, ArrayList potentialSubtypes, org.eclipse.jdt.core.ICompilationUnit[] workingCopies, HashSet localTypes, IProgressMonitor monitor) throws JavaModelException { // resolve int openablesLength = potentialSubtypes.size(); if (openablesLength > 0) { // copy vectors into arrays Openable[] openables = new Openable[openablesLength]; potentialSubtypes.toArray(openables); // sort in the order of roots and in reverse alphabetical order for .class file // since requesting top level types in the process of caching an enclosing type is // not supported by the lookup environment IPackageFragmentRoot[] roots = project.getPackageFragmentRoots(); int rootsLength = roots.length; final HashtableOfObjectToInt indexes = new HashtableOfObjectToInt(openablesLength); for (int i = 0; i < openablesLength; i++) { IJavaElement root = openables[i].getAncestor(IJavaElement.PACKAGE_FRAGMENT_ROOT); int index; for (index = 0; index < rootsLength; index++) { if (roots[index].equals(root)) break; } indexes.put(openables[i], index); } Arrays.sort( openables, new Comparator() { public int compare(Object a, Object b) { int aIndex = indexes.get(a); int bIndex = indexes.get(b); if (aIndex != bIndex) return aIndex - bIndex; return ((Openable) b).getElementName().compareTo(((Openable) a).getElementName()); } }); IType focusType = getType(); boolean inProjectOfFocusType = focusType != null && focusType.getJavaProject().equals(project); org.eclipse.jdt.core.ICompilationUnit[] unitsToLookInside = null; if (inProjectOfFocusType) { org.eclipse.jdt.core.ICompilationUnit unitToLookInside = focusType.getCompilationUnit(); if (unitToLookInside != null) { int wcLength = workingCopies == null ? 0 : workingCopies.length; if (wcLength == 0) { unitsToLookInside = new org.eclipse.jdt.core.ICompilationUnit[] {unitToLookInside}; } else { unitsToLookInside = new org.eclipse.jdt.core.ICompilationUnit[wcLength + 1]; unitsToLookInside[0] = unitToLookInside; System.arraycopy(workingCopies, 0, unitsToLookInside, 1, wcLength); } } else { unitsToLookInside = workingCopies; } } SearchableEnvironment searchableEnvironment = project.newSearchableNameEnvironment(unitsToLookInside); this.nameLookup = searchableEnvironment.nameLookup; Map options = project.getOptions(true); // disable task tags to speed up parsing options.put(JavaCore.COMPILER_TASK_TAGS, ""); // $NON-NLS-1$ this.hierarchyResolver = new HierarchyResolver(searchableEnvironment, options, this, new DefaultProblemFactory()); if (focusType != null) { Member declaringMember = ((Member) focusType).getOuterMostLocalContext(); if (declaringMember == null) { // top level or member type if (!inProjectOfFocusType) { char[] typeQualifiedName = focusType.getTypeQualifiedName('.').toCharArray(); String[] packageName = ((PackageFragment) focusType.getPackageFragment()).names; if (searchableEnvironment.findType(typeQualifiedName, Util.toCharArrays(packageName)) == null) { // focus type is not visible in this project: no need to go further return; } } } else { // local or anonymous type Openable openable; if (declaringMember.isBinary()) { openable = (Openable) declaringMember.getClassFile(); } else { openable = (Openable) declaringMember.getCompilationUnit(); } localTypes = new HashSet(); localTypes.add(openable.getPath().toString()); this.hierarchyResolver.resolve(new Openable[] {openable}, localTypes, monitor); return; } } this.hierarchyResolver.resolve(openables, localTypes, monitor); } }
/** @since 1.6 */ public static boolean isConcrete(Member member) { int mod = member.getModifiers(); return (mod & (Modifier.INTERFACE | Modifier.ABSTRACT)) == 0; }