@Override public Void apply(IMachine machine) { List<IMedium> mediaToBeDeleted = ImmutableList.of(); try { mediaToBeDeleted = machine.unregister(vmSpec.getCleanupMode()); } catch (VBoxException e) { ErrorCode errorCode = ErrorCode.valueOf(e); switch (errorCode) { case VBOX_E_OBJECT_NOT_FOUND: logger.debug("Machine %s does not exists, cannot unregister", vmSpec.getVmName()); break; default: throw e; } } List<IMedium> filteredMediaToBeDeleted = Lists.newArrayList( transform( filter(mediaToBeDeleted, new AutoDeleteHardDiskPredicate(vmSpec)), new DeleteChildrenOfMedium())); if (!filteredMediaToBeDeleted.isEmpty()) { try { IProgress deletion = machine.delete(filteredMediaToBeDeleted); deletion.waitForCompletion(-1); } catch (Exception e) { logger.error(e, "Problem in deleting the media attached to %s", machine.getName()); Throwables.propagate(e); } } return null; }
private String putDirectoryBlob(final String containerName, final Blob blob) throws IOException { String blobKey = blob.getMetadata().getName(); ContentMetadata metadata = blob.getMetadata().getContentMetadata(); Long contentLength = metadata.getContentLength(); if (contentLength != null && contentLength != 0) { throw new IllegalArgumentException("Directory blob cannot have content: " + blobKey); } File outputFile = getFileForBlobKey(containerName, blobKey); Path outputPath = outputFile.toPath(); if (!outputFile.isDirectory() && !outputFile.mkdirs()) { throw new IOException("Unable to mkdir: " + outputPath); } UserDefinedFileAttributeView view = getUserDefinedFileAttributeView(outputPath); if (view != null) { try { view.write(XATTR_CONTENT_MD5, ByteBuffer.wrap(DIRECTORY_MD5)); writeCommonMetadataAttr(view, blob); } catch (IOException e) { logger.debug("xattrs not supported on %s", outputPath); } } else { logger.warn("xattr not supported on %s", blobKey); } return base16().lowerCase().encode(DIRECTORY_MD5); }
@Override public Server apply(@Nullable String baseResource) { if (!jetty.getState().equals(Server.STARTED) // TODO code smell = hard coding addresses or ports!! && !new InetSocketAddressConnect().apply(new IPSocket("localhost", port))) { ResourceHandler resource_handler = new ResourceHandler(); resource_handler.setDirectoriesListed(true); resource_handler.setWelcomeFiles(new String[] {"index.html"}); resource_handler.setResourceBase(baseResource); logger.info("serving " + resource_handler.getBaseResource()); HandlerList handlers = new HandlerList(); handlers.setHandlers(new Handler[] {resource_handler, new DefaultHandler()}); jetty.setHandler(handlers); try { jetty.start(); } catch (Exception e) { logger.error(e, "Server jetty could not be started at this %s", baseResource); } return jetty; } else { logger.debug("Server jetty serving %s already running. Skipping start", baseResource); return jetty; } }
@Override public ExecResponse call() { checkState(ssh != null, "please call init() before invoking call"); try { ssh.connect(); ExecResponse returnVal; eventBus.post(new StatementOnNodeSubmission(statement, node)); String command = runAsRoot ? execAsRoot(statement.render(OsFamily.UNIX)) : execScriptAsDefaultUser(statement.render(OsFamily.UNIX)); try { returnVal = runCommand(command); } catch (Throwable e) { eventBus.post(new StatementOnNodeFailure(statement, node, e)); throw Throwables.propagate(e); } eventBus.post(new StatementOnNodeCompletion(statement, node, returnVal)); if (logger.isTraceEnabled()) logger.trace("<< %s[%s]", statement, returnVal); else logger.debug("<< %s(%d)", statement, returnVal.getExitStatus()); return returnVal; } finally { if (ssh != null) ssh.disconnect(); } }
private void blockUntilRunningAndAssignElasticIpsToInstancesOrPutIntoBadMap( Set<RunningInstance> input, Map<NodeMetadata, Exception> badNodes) { Map<RegionAndName, RunningInstance> instancesById = Maps.uniqueIndex(input, instanceToRegionAndName); for (Map.Entry<RegionAndName, RunningInstance> entry : instancesById.entrySet()) { RegionAndName id = entry.getKey(); RunningInstance instance = entry.getValue(); try { logger.debug("<< allocating elastic IP instance(%s)", id); String ip = client.getElasticIPAddressServices().allocateAddressInRegion(id.getRegion()); // block until instance is running logger.debug(">> awaiting status running instance(%s)", id); AtomicReference<NodeMetadata> node = newReference(runningInstanceToNodeMetadata.apply(instance)); nodeRunning.apply(node); logger.trace("<< running instance(%s)", id); logger.debug(">> associating elastic IP %s to instance %s", ip, id); client .getElasticIPAddressServices() .associateAddressInRegion(id.getRegion(), ip, id.getName()); logger.trace("<< associated elastic IP %s to instance %s", ip, id); // add mapping of instance to ip into the cache elasticIpCache.put(id, ip); } catch (RuntimeException e) { badNodes.put(runningInstanceToNodeMetadata.apply(instancesById.get(id)), e); } } }
@Override public void removeBlob(final String container, final String blobKey) { filesystemContainerNameValidator.validate(container); filesystemBlobKeyValidator.validate(blobKey); String fileName = buildPathStartingFromBaseDir(container, blobKey); logger.debug("Deleting blob %s", fileName); File fileToBeDeleted = new File(fileName); if (fileToBeDeleted.isDirectory()) { try { UserDefinedFileAttributeView view = getUserDefinedFileAttributeView(fileToBeDeleted.toPath()); if (view != null) { for (String s : view.list()) { view.delete(s); } } } catch (IOException e) { logger.debug("Could not delete attributes from %s: %s", fileToBeDeleted, e); } } try { delete(fileToBeDeleted); } catch (IOException e) { logger.debug("Could not delete %s: %s", fileToBeDeleted, e); } // now examine if the key of the blob is a complex key (with a directory structure) // and eventually remove empty directory removeDirectoriesTreeOfBlobKey(container, blobKey); }
public Set<? extends LoadBalancerMetadata> listLoadBalancers() { logger.debug(">> listing load balancers"); LinkedHashSet<LoadBalancerMetadata> set = newLinkedHashSet(listLoadBalancersStrategy.listLoadBalancers()); logger.debug("<< list(%d)", set.size()); return set; }
/** {@inheritDoc} */ @Override public void destroyLoadBalancer(final String id) { checkNotNull(id, "id"); logger.debug(">> destroying load balancer(%s)", id); final AtomicReference<LoadBalancerMetadata> loadBalancer = Atomics.newReference(); Predicate<String> tester = retry( new Predicate<String>() { public boolean apply(String input) { try { LoadBalancerMetadata md = destroyLoadBalancerStrategy.destroyLoadBalancer(id); if (md != null) loadBalancer.set(md); return true; } catch (IllegalStateException e) { logger.warn("<< illegal state destroying load balancer(%s)", id); return false; } } }, 3000, 1000, MILLISECONDS); // TODO make timeouts class like ComputeServiceconstants boolean successful = tester.apply(id) && loadBalancer.get() == null; // TODO add load // balancerTerminated // retryable predicate // (load balancer.get() == null || // load balancerTerminated.apply(load balancer.get())); logger.debug("<< destroyed load balancer(%s) success(%s)", id, successful); }
@Override public Hardware apply(VAppTemplate from) { checkNotNull(from, "VAppTemplate"); if (!from.isOvfDescriptorUploaded()) { logger.warn("cannot parse hardware as ovf descriptor for %s is not uploaded", from); return null; } Envelope ovf = client.getOvfEnvelopeForVAppTemplate(from.getHref()); if (ovf == null) { logger.warn("cannot parse hardware as no ovf envelope found for %s", from); return null; } if (ovf.getVirtualSystem().getVirtualHardwareSections().size() == 0) { logger.warn("cannot parse hardware for %s as no hardware sections exist in ovf %s", ovf); return null; } if (ovf.getVirtualSystem().getVirtualHardwareSections().size() > 1) { logger.warn("multiple hardware choices found. using first", ovf); } VirtualHardwareSection hardware = Iterables.get(ovf.getVirtualSystem().getVirtualHardwareSections(), 0); HardwareBuilder builder = rasdToHardwareBuilder.apply(hardware.getItems()); builder.location(findLocationForResource.apply(checkNotNull(parent, "parent"))); builder .ids(from.getHref().toASCIIString()) .name(from.getName()) .supportsImage(ImagePredicates.idEquals(from.getHref().toASCIIString())); return builder.build(); }
public static void logResponse(Logger logger, HttpResponse response, String prefix) { if (logger.isDebugEnabled()) { logger.debug("%s %s", prefix, response.getStatusLine().toString()); for (Entry<String, String> header : response.getHeaders().entries()) { logger.debug("%s %s: %s", prefix, header.getKey(), header.getValue()); } } }
protected ExecResponse runAction(String action) { ExecResponse returnVal; String command = (runAsRoot) ? execScriptAsRoot(action) : execScriptAsDefaultUser(action); returnVal = runCommand(command); if (logger.isTraceEnabled()) logger.trace("<< %s[%s]", action, returnVal); else logger.debug("<< %s(%d)", action, returnVal.getExitCode()); return returnVal; }
public static <T> Map<T, Exception> awaitCompletion( Map<T, ? extends Future<?>> responses, ExecutorService exec, @Nullable Long maxTime, final Logger logger, final String logPrefix) { if (responses.size() == 0) return ImmutableMap.of(); final int total = responses.size(); final CountDownLatch doneSignal = new CountDownLatch(total); final AtomicInteger complete = new AtomicInteger(0); final AtomicInteger errors = new AtomicInteger(0); final long start = System.currentTimeMillis(); final Map<T, Exception> errorMap = Maps.newHashMap(); for (final java.util.Map.Entry<T, ? extends Future<?>> future : responses.entrySet()) { Futures.makeListenable(future.getValue(), exec) .addListener( new Runnable() { @Override public void run() { try { future.getValue().get(); complete.incrementAndGet(); } catch (Exception e) { errors.incrementAndGet(); logException(logger, logPrefix, total, complete.get(), errors.get(), start, e); errorMap.put(future.getKey(), e); } doneSignal.countDown(); } @Override public String toString() { return "callGetOnFuture(" + future.getKey() + "," + future.getValue() + ")"; } }, exec); } try { if (maxTime != null) doneSignal.await(maxTime, TimeUnit.MILLISECONDS); else doneSignal.await(); if (errors.get() > 0) { String message = message(logPrefix, total, complete.get(), errors.get(), start); RuntimeException exception = new RuntimeException(message); logger.error(exception, message); } if (logger.isTraceEnabled()) { String message = message(logPrefix, total, complete.get(), errors.get(), start); logger.trace(message); } } catch (InterruptedException e) { String message = message(logPrefix, total, complete.get(), errors.get(), start); TimeoutException exception = new TimeoutException(message); logger.error(exception, message); Throwables.propagate(exception); } return errorMap; }
public boolean apply(LoadBalancerRule rule) { logger.trace("looking for state on rule %s", checkNotNull(rule, "rule")); rule = refresh(rule); if (rule == null) return false; logger.trace( "%s: looking for rule state %s: currently: %s", rule.getId(), State.ACTIVE, rule.getState()); return rule.getState() == State.ACTIVE; }
public static void logRequest(Logger logger, HttpRequest request, String prefix) { if (logger.isDebugEnabled()) { logger.debug("%s %s", prefix, request.getRequestLine().toString()); for (Entry<String, String> header : request.getHeaders().entries()) { if (header.getKey() != null) logger.debug("%s %s: %s", prefix, header.getKey(), header.getValue()); } } }
@VisibleForTesting Password getBestPassword(Set<Password> passwords, VirtualGuest context) { if (passwords == null || passwords.isEmpty()) { throw new IllegalStateException("No credentials declared for " + context); } if (passwords.size() == 1) { // usual path return Iterables.getOnlyElement(passwords); } // in some setups a there may be multiple passwords; pick the best Password bestPassword = null; Set<Password> alternates = Sets.newLinkedHashSet(); int bestScore = -1; for (Password p : passwords) { int score = -1; if ("root".equals(p.getUsername())) score = 10; else if ("root".equalsIgnoreCase(p.getUsername())) score = 4; else if ("ubuntu".equals(p.getUsername())) score = 8; else if ("ubuntu".equalsIgnoreCase(p.getUsername())) score = 3; else if ("administrator".equals(p.getUsername())) score = 5; else if ("administrator".equalsIgnoreCase(p.getUsername())) score = 2; else if (p.getUsername() != null && p.getUsername().length() > 1) score = 1; if (score > 0) { if (score > bestScore) { bestPassword = p; alternates.clear(); bestScore = score; } else if (score == bestScore) { alternates.add(p); } } } if (bestPassword == null) { throw new IllegalStateException( "No valid credentials available for " + context + "; found: " + passwords); } if (!alternates.isEmpty()) { logger.warn( "Multiple credentials for " + bestPassword.getUsername() + "@" + context + "; using first declared " + bestPassword + " and ignoring " + alternates); } else { logger.debug( "Multiple credentials for " + context + "; using preferred username " + bestPassword.getUsername()); } return bestPassword; }
public ExecResponse runAction(String action) { ExecResponse returnVal; String command = (runAsRoot && Predicates.in(ImmutableSet.of("start", "stop", "run")).apply(action)) ? execScriptAsRoot(action) : execScriptAsDefaultUser(action); returnVal = runCommand(command); if ("status".equals(action)) logger.trace("<< %s(%d)", action, returnVal.getExitCode()); else if (computeLogger.isTraceEnabled()) computeLogger.trace("<< %s[%s]", action, returnVal); else computeLogger.debug("<< %s(%d)", action, returnVal.getExitCode()); return returnVal; }
public void execute(OrgAndName orgTag) { for (KeyPair keyPair : terremarkClient.listKeyPairsInOrg(orgTag.getOrg())) { if (keyPair .getName() .matches("jclouds_" + orgTag.getName().replaceAll("-", "_") + "_[0-9a-f]+")) { logger.debug(">> deleting keyPair(%s)", keyPair.getName()); terremarkClient.deleteKeyPair(keyPair.getId()); // TODO: test this clear happens credentialsMap.remove(orgTag); logger.debug("<< deleted keyPair(%s)", keyPair.getName()); } } }
public boolean apply(String taskId) { logger.trace("looking for status on task %s", checkNotNull(taskId, "taskId")); Task task = refresh(taskId); if (task == null) return false; logger.trace( "%s: looking for task status %s: currently: %s", task.getId(), Task.Status.SUCCESS, task.getStatus()); if (task.getError() != null) throw new IllegalStateException( String.format( "task %s failed with exception %s", task.getId(), task.getError().toString())); return task.getStatus() == Task.Status.SUCCESS; }
private String parseGroupFrom(final RunningInstance instance, final Set<String> data) { String group = null; try { Predicate<String> containsAnyGroup = namingConvention.create().containsAnyGroup(); String encodedGroup = Iterables.getOnlyElement(Iterables.filter(data, containsAnyGroup)); group = namingConvention.create().extractGroup(encodedGroup); } catch (NoSuchElementException e) { logger.debug("no group parsed from %s's data: %s", instance.getId(), data); } catch (IllegalArgumentException e) { logger.debug( "too many groups match naming convention; %s's data: %s", instance.getId(), data); } return group; }
ExecResponse runCommand(String command) { String statement = String.format( ">> running [%s] as %s@%s", command.replace( node.getCredentials().getPassword() != null ? node.getCredentials().getPassword() : "XXXXX", "XXXXX"), ssh.getUsername(), ssh.getHostAddress()); if (command.endsWith("status")) logger.trace(statement); else computeLogger.debug(statement); return ssh.exec(command); }
/** * Creates a directory and returns the result * * @param container * @param directory * @return true if the directory was created, otherwise false */ protected boolean createDirectoryWithResult(String container, String directory) { String directoryFullName = buildPathStartingFromBaseDir(container, directory); logger.debug("Creating directory %s", directoryFullName); // cannot use directoryFullName, because the following method rebuild // another time the path starting from base directory if (buildPathAndChecksIfDirectoryExists(container, directory)) { logger.debug("Directory %s already exists", directoryFullName); return false; } File directoryToCreate = new File(directoryFullName); boolean result = directoryToCreate.mkdirs(); return result; }
@Override public Set<? extends Image> get() { final Set<Image> images = Sets.newHashSet(); logger.debug(">> providing images"); for (org.jclouds.rimuhosting.miro.domain.Image from : sync.getImageList()) { ImageBuilder builder = new ImageBuilder(); builder.ids(from.getId() + ""); builder.name(from.getDescription()); builder.description(from.getDescription()); builder.operatingSystem(parseOs(from)); builder.status(Status.AVAILABLE); images.add(builder.build()); } logger.debug("<< images(%d)", images.size()); return images; }
@Override public Map<?, ListenableFuture<Void>> execute( String group, int count, Template template, Set<NodeMetadata> goodNodes, Map<NodeMetadata, Exception> badNodes, Multimap<NodeMetadata, CustomizationResponse> customizationResponses) { Template mutableTemplate = template.clone(); Set<RunningInstance> started = runInstancesAndWarnOnInvisible(group, count, mutableTemplate); if (started.size() == 0) { logger.warn("<< unable to start instances(%s)", mutableTemplate); return ImmutableMap.of(); } populateCredentials(started, template.getOptions()); if (autoAllocateElasticIps) // before customization as the elastic ips may be needed blockUntilRunningAndAssignElasticIpsToInstancesOrPutIntoBadMap(started, badNodes); return utils.customizeNodesAndAddToGoodMapOrPutExceptionIntoBadMap( mutableTemplate.getOptions(), transform(started, runningInstanceToNodeMetadata), goodNodes, badNodes, customizationResponses); }
@Override public ServerInDataCenter getNode(String id) { DataCenterAndId datacenterAndId = DataCenterAndId.fromSlashEncoded(id); logger.trace("<< searching for server with id=%s", id); Server server = api.serverApi() .getServer( datacenterAndId.getDataCenter(), datacenterAndId.getId(), new DepthOptions().depth(3)); if (server != null) { logger.trace(">> found server [%s]", server.properties().name()); } return server == null ? null : new ServerInDataCenter(server, datacenterAndId.getDataCenter()); }
@Override public Iterable<? extends Node> execute( final ListeningExecutorService executor, String environmentName, Iterable<String> toGet) { ListenableFuture<List<Node>> futures = allAsList( transform( toGet, new Function<String, ListenableFuture<Node>>() { @Override public ListenableFuture<Node> apply(final String input) { return executor.submit( new Callable<Node>() { @Override public Node call() throws Exception { return api.getNode(input); } }); } })); logger.trace( String.format( "getting nodes in environment %s: %s", environmentName, Joiner.on(',').join(toGet))); return getUnchecked(futures); }
private Iterable<VirtualMachineWithNodeExtendedDto> listConcurrentVirtualMachines( final ListeningExecutorService executor, final Iterable<VirtualAppliance> vapps, final VirtualMachineOptions options) { ListenableFuture<List<VirtualMachinesWithNodeExtendedDto>> futures = allAsList( transform( vapps, new Function< VirtualAppliance, ListenableFuture<VirtualMachinesWithNodeExtendedDto>>() { @Override public ListenableFuture<VirtualMachinesWithNodeExtendedDto> apply( final VirtualAppliance input) { return executor.submit( new Callable<VirtualMachinesWithNodeExtendedDto>() { @Override public VirtualMachinesWithNodeExtendedDto call() throws Exception { return context .getApi() .getCloudApi() .listVirtualMachines(input.unwrap(), options); } }); } })); logger.trace("getting virtual machines"); return DomainWrapper.join(getUnchecked(futures)); }
/** * Terremark does not provide vApp templates in the vDC resourceEntity list. Rather, you must * query the catalog. */ @Override public Set<? extends Image> get() { logger.debug(">> providing vAppTemplates"); return newLinkedHashSet( Iterables.concat( Iterables.transform(organizatonsForLocations.apply(locations.get()), imagesInOrg))); }
@Override public Iterable<ServerInDataCenter> listNodes() { logger.trace("<< fetching servers.."); datacetners = api.dataCenterApi().list(); List<ServerInDataCenter> servers = new ArrayList<ServerInDataCenter>(); for (DataCenter dataCenter : datacetners) { List<Server> serversInDataCenter = api.serverApi().getList(dataCenter.id(), new DepthOptions().depth(4)); for (Server server : serversInDataCenter) { servers.add(new ServerInDataCenter(server, dataCenter.id())); } } logger.trace("<< fetching servers.."); return servers; }
@Override public MonitorStatus apply(final VirtualMachine virtualMachine) { checkNotNull(virtualMachine, "virtualMachine"); try { VirtualMachineState state = virtualMachine.getState(); switch (state) { case NOT_ALLOCATED: case UNKNOWN: return MonitorStatus.FAILED; case ON: return MonitorStatus.DONE; default: return MonitorStatus.CONTINUE; } } catch (Exception ex) { logger.warn( ex, "exception thrown while monitoring %s on %s, returning CONTINUE", virtualMachine, getClass().getName()); return MonitorStatus.CONTINUE; } }
@Test public void testLaunchCluster() throws RunNodesException { int numNodes = 3; final String clusterName = "test-launch-cluster"; Set<? extends NodeMetadata> nodes = context .getComputeService() .createNodesInGroup( clusterName, numNodes, TemplateOptions.Builder.overrideLoginUser("toor") .runScript(AdminAccess.standard())); assertEquals(numNodes, nodes.size(), "wrong number of nodes"); for (NodeMetadata node : nodes) { assertEquals("test-launch-cluster", node.getGroup()); logger.debug("Created Node: %s", node); SshClient client = context.utils().sshForNode().apply(node); client.connect(); ExecResponse hello = client.exec("echo hello"); assertEquals(hello.getOutput().trim(), "hello"); } context .getComputeService() .destroyNodesMatching( new Predicate<NodeMetadata>() { @Override public boolean apply(NodeMetadata input) { return input.getId().contains(clusterName); } }); }