@Override public String getRpcAddress() { String sensorName = getConfig(RPC_ADDRESS_SENSOR); if (Strings.isNonBlank(sensorName)) return Entities.submit( this, DependentConfiguration.attributeWhenReady(this, Sensors.newStringSensor(sensorName))) .getUnchecked(); return "0.0.0.0"; }
@Override public String getBroadcastAddress() { String sensorName = getConfig(BROADCAST_ADDRESS_SENSOR); if (Strings.isNonBlank(sensorName)) return Entities.submit( this, DependentConfiguration.attributeWhenReady(this, Sensors.newStringSensor(sensorName))) .getUnchecked(); String snitchName = getConfig(CassandraNode.ENDPOINT_SNITCH_NAME); if (snitchName.equals("Ec2MultiRegionSnitch") || snitchName.contains("MultiCloudSnitch")) { // http://www.datastax.com/documentation/cassandra/2.0/mobile/cassandra/architecture/architectureSnitchEC2MultiRegion_c.html // describes that the listen_address is set to the private IP, and the broadcast_address is // set to the public IP. return getAttribute(CassandraNode.ADDRESS); } else if (!getDriver().isClustered()) { return getListenAddress(); } else { // In other situations, prefer the hostname, so other regions can see it // *Unless* hostname resolves at the target to a local-only interface which is different to // ADDRESS // (workaround for issue deploying to localhost) String hostname = getAttribute(CassandraNode.HOSTNAME); try { String resolvedAddress = getDriver().getResolvedAddress(hostname); if (resolvedAddress == null) { log.debug( "Cassandra using broadcast address " + getListenAddress() + " for " + this + " because hostname " + hostname + " could not be resolved at remote machine"); return getListenAddress(); } if (resolvedAddress.equals("127.0.0.1")) { log.debug( "Cassandra using broadcast address " + getListenAddress() + " for " + this + " because hostname " + hostname + " resolves to 127.0.0.1"); return getListenAddress(); } return hostname; } catch (Exception e) { Exceptions.propagateIfFatal(e); log.warn("Error resolving hostname " + hostname + " for " + this + ": " + e, e); return hostname; } } }
@Override public String getListenAddress() { String sensorName = getConfig(LISTEN_ADDRESS_SENSOR); if (Strings.isNonBlank(sensorName)) return Entities.submit( this, DependentConfiguration.attributeWhenReady(this, Sensors.newStringSensor(sensorName))) .getUnchecked(); String subnetAddress = getAttribute(CassandraNode.SUBNET_ADDRESS); return Strings.isNonBlank(subnetAddress) ? subnetAddress : getAttribute(CassandraNode.ADDRESS); }
protected void doStop(ConfigBag parameters, Callable<StopMachineDetails<Integer>> stopTask) { preStopConfirmCustom(); log.info("Stopping {} in {}", entity(), entity().getLocations()); StopMode stopMachineMode = getStopMachineMode(parameters); StopMode stopProcessMode = parameters.get(StopSoftwareParameters.STOP_PROCESS_MODE); DynamicTasks.queue("pre-stop", new PreStopCustomTask()); // BROOKLYN-263: // With this change the stop effector will wait for Location to provision so it can terminate // the machine, if a provisioning request is in-progress. // // The ProvisionMachineTask stores transient internal state in PROVISIONING_TASK_STATE and // PROVISIONED_MACHINE: it records when the provisioning is running and when done; and it // records the final machine. We record the machine in the internal sensor (rather than // just relying on getLocations) because the latter is set much later in the start() // process. // // This code is a big improvement (previously there was a several-minute window in some // clouds where a call to stop() would leave the machine running). // // However, there are still races. If the start() code has not yet reached the call to // location.obtain() then we won't wait, and the start() call won't know to abort. It's // fiddly to get that right, because we need to cope with restart() - so we mustn't leave // any state behind that will interfere with subsequent sequential calls to start(). // There is some attempt to handle it by ProvisionMachineTask checking if the expectedState // is stopping/stopped. Maybe<MachineLocation> machine = Machines.findUniqueMachineLocation(entity().getLocations()); ProvisioningTaskState provisioningState = entity().sensors().get(AttributesInternal.INTERNAL_PROVISIONING_TASK_STATE); if (machine.isAbsent() && provisioningState == ProvisioningTaskState.RUNNING) { Duration maxWait = entity().config().get(STOP_WAIT_PROVISIONING_TIMEOUT); log.info( "When stopping {}, waiting for up to {} for the machine to finish provisioning, before terminating it", entity(), maxWait); boolean success = Repeater.create("Wait for a machine to appear") .until( new Callable<Boolean>() { @Override public Boolean call() throws Exception { ProvisioningTaskState state = entity() .sensors() .get(AttributesInternal.INTERNAL_PROVISIONING_TASK_STATE); return (state != ProvisioningTaskState.RUNNING); } }) .backoffTo(Duration.FIVE_SECONDS) .limitTimeTo(maxWait) .run(); if (!success) { log.warn( "When stopping {}, timed out after {} waiting for the machine to finish provisioning - machine may we left running", entity(), maxWait); } machine = Maybe.ofDisallowingNull(entity().sensors().get(INTERNAL_PROVISIONED_MACHINE)); } entity().sensors().remove(AttributesInternal.INTERNAL_PROVISIONING_TASK_STATE); entity().sensors().remove(INTERNAL_PROVISIONED_MACHINE); Task<List<?>> stoppingProcess = null; if (canStop(stopProcessMode, entity())) { stoppingProcess = Tasks.parallel( "stopping", Tasks.create("stopping (process)", new StopProcessesAtMachineTask()), Tasks.create("stopping (feeds)", new StopFeedsAtMachineTask())); DynamicTasks.queue(stoppingProcess); } Task<StopMachineDetails<Integer>> stoppingMachine = null; if (canStop(stopMachineMode, machine.isAbsent())) { // Release this machine (even if error trying to stop process - we rethrow that after) Map<String, Object> stopMachineFlags = MutableMap.of(); if (Entitlements.getEntitlementContext() != null) { stopMachineFlags.put( "tags", MutableSet.of( BrooklynTaskTags.tagForEntitlement(Entitlements.getEntitlementContext()))); } Task<StopMachineDetails<Integer>> stopMachineTask = Tasks.<StopMachineDetails<Integer>>builder() .displayName("stopping (machine)") .body(stopTask) .flags(stopMachineFlags) .build(); stoppingMachine = DynamicTasks.queue(stopMachineTask); DynamicTasks.drain(entity().getConfig(STOP_PROCESS_TIMEOUT), false); // shutdown the machine if stopping process fails or takes too long synchronized (stoppingMachine) { // task also used as mutex by DST when it submits it; ensure it only submits once! if (!stoppingMachine.isSubmitted()) { // force the stoppingMachine task to run by submitting it here StringBuilder msg = new StringBuilder("Submitting machine stop early in background for ") .append(entity()); if (stoppingProcess == null) { msg.append(". Process stop skipped, pre-stop not finished?"); } else { msg.append(" because process stop has ") .append((stoppingProcess.isDone() ? "finished abnormally" : "not finished")); } log.warn(msg.toString()); Entities.submit(entity(), stoppingMachine); } } } try { // This maintains previous behaviour of silently squashing any errors on the stoppingProcess // task if the // stoppingMachine exits with a nonzero value boolean checkStopProcesses = (stoppingProcess != null && (stoppingMachine == null || stoppingMachine.get().value == 0)); if (checkStopProcesses) { // TODO we should test for destruction above, not merely successful "stop", as things like // localhost and ssh won't be destroyed DynamicTasks.waitForLast(); if (machine.isPresent()) { // throw early errors *only if* there is a machine and we have not destroyed it stoppingProcess.get(); } } } catch (Throwable e) { ServiceStateLogic.setExpectedState(entity(), Lifecycle.ON_FIRE); Exceptions.propagate(e); } entity().sensors().set(SoftwareProcess.SERVICE_UP, false); ServiceStateLogic.setExpectedState(entity(), Lifecycle.STOPPED); DynamicTasks.queue("post-stop", new PostStopCustomTask()); if (log.isDebugEnabled()) log.debug("Stopped software process entity " + entity()); }