@Override @SuppressWarnings({"rawtypes", "unchecked"}) public void execute() throws IOException, RecommenderBuildException { LenskitRecommenderEngine engine = loadEngine(); long user = options.getLong("user"); List<Long> items = options.get("items"); LenskitRecommender rec = engine.createRecommender(); RatingPredictor pred = rec.getRatingPredictor(); if (pred == null) { logger.error("recommender has no rating predictor"); throw new UnsupportedOperationException("no rating predictor"); } logger.info("predicting {} items", items.size()); Symbol pchan = getPrintChannel(); Stopwatch timer = Stopwatch.createStarted(); SparseVector preds = pred.predict(user, items); Long2ObjectMap channel = null; if (pchan != null) { for (TypedSymbol sym : preds.getChannelSymbols()) { if (sym.getRawSymbol().equals(pchan)) { channel = preds.getChannel(sym); } } } for (VectorEntry e : preds) { System.out.format(" %d: %.3f", e.getKey(), e.getValue()); if (channel != null) { System.out.format(" (%s)", channel.get(e.getKey())); } System.out.println(); } timer.stop(); logger.info("predicted for {} items in {}", items.size(), timer); }
@Override protected int runWithJobId( final Namespace options, final HeliosClient client, final PrintStream out, final boolean json, final JobId jobId, final BufferedReader stdin) throws ExecutionException, InterruptedException, IOException { final String name = options.getString(nameArg.getDest()); final long timeout = options.getLong(timeoutArg.getDest()); final int parallelism = options.getInt(parallelismArg.getDest()); final boolean async = options.getBoolean(asyncArg.getDest()); final long rolloutTimeout = options.getLong(rolloutTimeoutArg.getDest()); final boolean migrate = options.getBoolean(migrateArg.getDest()); final boolean overlap = options.getBoolean(overlapArg.getDest()); final String token = options.getString(tokenArg.getDest()); checkArgument(timeout > 0, "Timeout must be greater than 0"); checkArgument(parallelism > 0, "Parallelism must be greater than 0"); checkArgument(rolloutTimeout > 0, "Rollout timeout must be greater than 0"); final long startTime = timeSupplier.get(); final RolloutOptions rolloutOptions = RolloutOptions.newBuilder() .setTimeout(timeout) .setParallelism(parallelism) .setMigrate(migrate) .setOverlap(overlap) .setToken(token) .build(); final RollingUpdateResponse response = client.rollingUpdate(name, jobId, rolloutOptions).get(); if (response.getStatus() != RollingUpdateResponse.Status.OK) { if (!json) { out.println("Failed: " + response); } else { out.println(response.toJsonString()); } return 1; } if (!json) { out.println( format( "Rolling update%s started: %s -> %s " + "(parallelism=%d, timeout=%d, overlap=%b, token=%s)%s", async ? " (async)" : "", name, jobId.toShortString(), parallelism, timeout, overlap, token, async ? "" : "\n")); } final Map<String, Object> jsonOutput = Maps.newHashMap(); jsonOutput.put("parallelism", parallelism); jsonOutput.put("timeout", timeout); jsonOutput.put("overlap", overlap); jsonOutput.put("token", token); if (async) { if (json) { jsonOutput.put("status", response.getStatus()); out.println(Json.asStringUnchecked(jsonOutput)); } return 0; } String error = ""; boolean failed = false; boolean timedOut = false; final Set<String> reported = Sets.newHashSet(); while (true) { final DeploymentGroupStatusResponse status = client.deploymentGroupStatus(name).get(); if (status == null) { failed = true; error = "Failed to fetch deployment-group status"; break; } if (!jobId.equals(status.getDeploymentGroup().getJobId())) { // Another rolling-update was started, overriding this one -- exit failed = true; error = "Deployment-group job id changed during rolling-update"; break; } if (!json) { for (DeploymentGroupStatusResponse.HostStatus hostStatus : status.getHostStatuses()) { final JobId hostJobId = hostStatus.getJobId(); final String host = hostStatus.getHost(); final TaskStatus.State state = hostStatus.getState(); final boolean done = hostJobId != null && hostJobId.equals(jobId) && state == TaskStatus.State.RUNNING; if (done && reported.add(host)) { out.println( format( "%s -> %s (%d/%d)", host, state, reported.size(), status.getHostStatuses().size())); } } } if (status.getStatus() != DeploymentGroupStatusResponse.Status.ROLLING_OUT) { if (status.getStatus() == DeploymentGroupStatusResponse.Status.FAILED) { failed = true; error = status.getError(); } break; } if (timeSupplier.get() - startTime > TimeUnit.MINUTES.toMillis(rolloutTimeout)) { // Rollout timed out timedOut = true; break; } sleepFunction.sleep(POLL_INTERVAL_MILLIS); } final double duration = (timeSupplier.get() - startTime) / 1000.0; if (json) { if (failed) { jsonOutput.put("status", "FAILED"); jsonOutput.put("error", error); } else if (timedOut) { jsonOutput.put("status", "TIMEOUT"); } else { jsonOutput.put("status", "DONE"); } jsonOutput.put("duration", duration); out.println(Json.asStringUnchecked(jsonOutput)); } else { out.println(); if (failed) { out.println(format("Failed: %s", error)); } else if (timedOut) { out.println("Timed out! (rolling-update still in progress)"); } else { out.println("Done."); } out.println(format("Duration: %.2f s", duration)); } return (failed || timedOut) ? 1 : 0; }