private Map<String, SpringResource> generateResourceMap(Set<Class<?>> validClasses) throws GenerateException { Map<String, SpringResource> resourceMap = new HashMap<String, SpringResource>(); for (Class<?> c : validClasses) { RequestMapping requestMapping = c.getAnnotation(RequestMapping.class); String description = ""; // This try/catch block is to stop a bamboo build from failing due to NoClassDefFoundError // This occurs when a class or method loaded by reflections contains a type that has no // dependency try { resourceMap = analyzeController(c, resourceMap, description); List<Method> mList = new ArrayList<Method>(Arrays.asList(c.getMethods())); if (c.getSuperclass() != null) { mList.addAll(Arrays.asList(c.getSuperclass().getMethods())); } } catch (NoClassDefFoundError e) { LOG.error(e.getMessage()); LOG.info(c.getName()); // exception occurs when a method type or annotation is not recognized by the plugin } catch (ClassNotFoundException e) { LOG.error(e.getMessage()); LOG.info(c.getName()); } } return resourceMap; }
@Override public void run() { if (isSynchronizedWithZooKeeper.get() || !zkClient.isConnected() || !started.get()) { return; } if (checkVersion.getAndSet(false)) { try { synchronized (lastStatusVersionMonitor) { final Stat stat = zkClient.getZookeeper().exists(path, null); if (stat != null && zkClient.getZookeeper().getSessionId() == stat.getEphemeralOwner()) { zkClient.getZookeeper().delete(path, lastStatusVersion); } } } catch (InterruptedException e) { LOG.info("Interrupted"); checkVersion.set(true); } catch (KeeperException e) { LOG.info("exception " + e.getMessage()); checkVersion.set(true); } } LOG.info( "We are out-of-sync, have a zookeeper connection, and are started, trying reclaim: " + path + this); tryClaim(); }
@Override public void attributeAdded(HttpSessionBindingEvent _event) { System.out.println( "edu.temple.cis3238.wiki.WikiEventMonitor.[SESSION]attributeAdded()\n added " + _event.getName() + _event.toString()); LOG.info(Objects.toString(_event.getValue().toString())); LOG.info(Objects.toString(_event.getSource())); }
@Override @NotNull public NewColorAndFontPanel createPanel(@NotNull ColorAndFontOptions options) { final DiffOptionsPanel optionsPanel = new DiffOptionsPanel(options); SchemesPanel schemesPanel = new SchemesPanel(options); PreviewPanel previewPanel; try { final DiffPreviewPanel diffPreviewPanel = new DiffPreviewPanel(myDisposable); diffPreviewPanel.setMergeRequest(null); schemesPanel.addListener( new ColorAndFontSettingsListener.Abstract() { @Override public void schemeChanged(final Object source) { diffPreviewPanel.setColorScheme(getSelectedScheme()); optionsPanel.updateOptionsList(); diffPreviewPanel.updateView(); } }); previewPanel = diffPreviewPanel; } catch (FilesTooBigForDiffException e) { LOG.info(e); previewPanel = new PreviewPanel.Empty(); } return new NewColorAndFontPanel( schemesPanel, optionsPanel, previewPanel, DIFF_GROUP, null, null); }
@Override public ReturnState dropTable(RpcController controller, TableIdentifierProto request) throws ServiceException { String dbName = request.getDatabaseName(); String tbName = request.getTableName(); if (linkedMetadataManager.existsDatabase(dbName)) { return errInsufficientPrivilege("drop a table in database '" + dbName + "'"); } if (metaDictionary.isSystemDatabase(dbName)) { return errInsufficientPrivilege("drop a table in database '" + dbName + "'"); } wlock.lock(); try { store.dropTable(dbName, tbName); LOG.info( String.format( "relation \"%s\" is deleted from the catalog (%s)", CatalogUtil.getCanonicalTableName(dbName, tbName), bindAddressStr)); return OK; } catch (Throwable t) { printStackTraceIfError(LOG, t); return returnError(t); } finally { wlock.unlock(); } }
@Override public ReturnState createDatabase(RpcController controller, CreateDatabaseRequest request) { String databaseName = request.getDatabaseName(); String tablespaceName = request.getTablespaceName(); if (linkedMetadataManager.existsDatabase(request.getDatabaseName())) { return errDuplicateDatabase(request.getDatabaseName()); } // check virtual database manually because catalog actually does not contain them. if (metaDictionary.isSystemDatabase(databaseName)) { return errDuplicateDatabase(databaseName); } wlock.lock(); try { store.createDatabase(databaseName, tablespaceName); LOG.info(String.format("database \"%s\" is created", databaseName)); return OK; } catch (Throwable t) { printStackTraceIfError(LOG, t); return returnError(t); } finally { wlock.unlock(); } }
public Pair<List<RepositoryLocationGroup>, List<RepositoryLocation>> groupLocations( final List<RepositoryLocation> in) { final List<RepositoryLocationGroup> groups = new ArrayList<RepositoryLocationGroup>(); final List<RepositoryLocation> singles = new ArrayList<RepositoryLocation>(); final MultiMap<SVNURL, RepositoryLocation> map = new MultiMap<SVNURL, RepositoryLocation>(); for (RepositoryLocation location : in) { final SvnRepositoryLocation svnLocation = (SvnRepositoryLocation) location; final String url = svnLocation.getURL(); final SVNURL root = SvnUtil.getRepositoryRoot(myVcs, url); if (root == null) { // should not occur LOG.info("repository root not found for location:" + location.toPresentableString()); singles.add(location); } else { map.putValue(root, svnLocation); } } final Set<SVNURL> keys = map.keySet(); for (SVNURL key : keys) { final Collection<RepositoryLocation> repositoryLocations = map.get(key); if (repositoryLocations.size() == 1) { singles.add(repositoryLocations.iterator().next()); } else { final SvnRepositoryLocationGroup group = new SvnRepositoryLocationGroup(key, repositoryLocations); groups.add(group); } } return new Pair<List<RepositoryLocationGroup>, List<RepositoryLocation>>(groups, singles); }
@Override public ReturnState createTable(RpcController controller, TableDescProto request) { String[] splitted = CatalogUtil.splitFQTableName(request.getTableName()); String dbName = splitted[0]; String tbName = splitted[1]; if (linkedMetadataManager.existsDatabase(dbName)) { return errInsufficientPrivilege("drop a table in database '" + dbName + "'"); } if (metaDictionary.isSystemDatabase(dbName)) { return errInsufficientPrivilege("create a table in database '" + dbName + "'"); } wlock.lock(); try { store.createTable(request); LOG.info( String.format( "relation \"%s\" is added to the catalog (%s)", CatalogUtil.getCanonicalTableName(dbName, tbName), bindAddressStr)); return OK; } catch (Throwable t) { printStackTraceIfError(LOG, t); return returnError(t); } finally { wlock.unlock(); } }
@Override public void attributeAdded(ServletContextAttributeEvent _event) { System.out.println( "edu.temple.cis3238.wiki.WikiEventMonitor.attributeAdded()\n added " + _event.getName() + _event.toString()); LOG.info(Objects.toString(_event.getSource())); }
@Override public void processPacket(Packet packet) { final Presence presence = ((Presence) packet); if (presence.getType() != Presence.Type.subscribe) return; LOG.info("Subscribe request from " + presence.getFrom()); if (myIgnoreList.isIgnored(presence.getFrom())) { LOG.info(presence.getFrom() + " in ignore list"); return; } if (isUserInMyContactListAndActive(presence.getFrom()) || Pico.isUnitTest()) { acceptSubscription(presence, true); return; } UIUtil.invokeLater( () -> acceptSubscription(presence, myUI.shouldAcceptSubscriptionRequest(presence))); }
public void report(Diagnostic<? extends JavaFileObject> diagnostic) { final CompilerMessage.Kind kind; switch (diagnostic.getKind()) { case ERROR: kind = BuildMessage.Kind.ERROR; myErrorCount++; break; case MANDATORY_WARNING: case WARNING: case NOTE: kind = BuildMessage.Kind.WARNING; myWarningCount++; break; default: kind = BuildMessage.Kind.INFO; } File sourceFile = null; try { // for eclipse compiler just an attempt to call getSource() may lead to an NPE, // so calling this method under try/catch to avoid induced compiler errors final JavaFileObject source = diagnostic.getSource(); sourceFile = source != null ? Utils.convertToFile(source.toUri()) : null; } catch (Exception e) { LOG.info(e); } final String srcPath = sourceFile != null ? FileUtil.toSystemIndependentName(sourceFile.getPath()) : null; String message = diagnostic.getMessage(Locale.US); if (Utils.IS_TEST_MODE) { LOG.info(message); } myContext.processMessage( new CompilerMessage( BUILDER_NAME, kind, message, srcPath, diagnostic.getStartPosition(), diagnostic.getEndPosition(), diagnostic.getPosition(), diagnostic.getLineNumber(), diagnostic.getColumnNumber())); }
@Override @NotNull public Collection<RefMethod> getSuperMethods() { if (mySuperMethods == null) return EMPTY_METHOD_LIST; if (mySuperMethods.size() > 10) { LOG.info("method: " + getName() + " owner:" + getOwnerClass().getQualifiedName()); } if (getRefManager().isOfflineView()) { LOG.debug("Should not traverse graph offline"); } return mySuperMethods; }
/** * Increase the count of running tasks and disk loads for a certain task runner. * * @param volumeId Volume identifier * @return the volume load (i.e., how many running tasks use this volume) */ private synchronized int increaseConcurrency(int volumeId) { int concurrency = 1; if (diskVolumeLoads.containsKey(volumeId)) { concurrency = diskVolumeLoads.get(volumeId) + 1; } if (volumeId > -1) { LOG.info( "Assigned host : " + host + ", Volume : " + volumeId + ", Concurrency : " + concurrency); } else if (volumeId == -1) { // this case is disabled namenode block meta or compressed text file or amazon s3 LOG.info( "Assigned host : " + host + ", Unknown Volume : " + volumeId + ", Concurrency : " + concurrency); } else if (volumeId == REMOTE) { // this case has processed all block on host and it will be assigned to remote LOG.info( "Assigned host : " + host + ", Remaining local tasks : " + getRemainingLocalTaskSize() + ", Remote Concurrency : " + concurrency); } diskVolumeLoads.put(volumeId, concurrency); return concurrency; }
private void acceptSubscription(final Presence presence, boolean subscribe) { if (!isOnline()) return; myFacade.changeSubscription(presence.getFrom(), subscribe); if (subscribe) { String from = getSimpleId(presence.getFrom()); LOG.info("Add " + from + " to the roster"); try { getRoster().createEntry(from, from, new String[] {UserModel.DEFAULT_GROUP}); } catch (XMPPException e) { LOG.warn(e); } } }
void applyFilter(@NotNull final List<T> requests, final ThreadReference thread) { for (T request : requests) { try { final boolean wasEnabled = request.isEnabled(); if (wasEnabled) { request.disable(); } addFilter(request, thread); if (wasEnabled) { request.enable(); } } catch (InternalException e) { LOG.info(e); } } }
@Override public ReturnState createTablespace(RpcController controller, CreateTablespaceRequest request) { final String tablespaceName = request.getTablespaceName(); final String uri = request.getTablespaceUri(); wlock.lock(); try { store.createTablespace(tablespaceName, uri); LOG.info(String.format("tablespace \"%s\" (%s) is created", tablespaceName, uri)); return OK; } catch (Throwable t) { printStackTraceIfError(LOG, t); return returnError(t); } finally { wlock.unlock(); } }
public static DockerContainer create( CreateAgentRequest request, PluginSettings settings, DockerClient docker) throws InterruptedException, DockerException, IOException { String containerName = UUID.randomUUID().toString(); HashMap<String, String> labels = labelsFrom(request); String imageName = image(request.properties()); List<String> env = environmentFrom(request, settings, containerName); try { docker.inspectImage(imageName); } catch (ImageNotFoundException ex) { LOG.info("Image " + imageName + " not found, attempting to download."); docker.pull(imageName); } ContainerConfig.Builder containerConfigBuilder = ContainerConfig.builder(); if (request.properties().containsKey("Command")) { containerConfigBuilder.cmd( splitIntoLinesAndTrimSpaces(request.properties().get("Command")) .toArray(new String[] {})); } ContainerConfig containerConfig = containerConfigBuilder.image(imageName).labels(labels).env(env).build(); ContainerCreation container = docker.createContainer(containerConfig, containerName); String id = container.id(); ContainerInfo containerInfo = docker.inspectContainer(id); LOG.debug("Created container " + containerName); docker.startContainer(containerName); return new DockerContainer( containerName, containerInfo.created(), request.properties(), request.environment()); }
@Override public void processResult(int rawReturnCode, String notUsed, Object parent, String notUsed2) { KeeperException.Code returnCode = KeeperException.Code.get(rawReturnCode); ClaimedCoordinate claimedCoordinate = (ClaimedCoordinate) parent; LOG.fine( "Claim callback with " + returnCode.name() + " " + claimedCoordinate.path + " synched: " + isSynchronizedWithZooKeeper.get() + " thread: " + this); switch (returnCode) { // The claim was successful. This means that the node was created. We need to // populate the status and endpoints. case OK: // We should be the first one to write to the new node, or fail. // This requires that the first version is 0, have not seen this documented // but it should be a fair assumption and is verified by unit tests. synchronized (lastStatusVersionMonitor) { lastStatusVersion = 0; } // We need to set this to synced or updateCoordinateData will complain. // updateCoordinateData will set it to out-of-sync in case of problems. isSynchronizedWithZooKeeper.set(true); try { registerWatcher(); } catch (CloudnameException e) { LOG.fine( "Failed register watcher after claim. Going to state out of sync: " + e.getMessage()); isSynchronizedWithZooKeeper.set(false); return; } catch (InterruptedException e) { LOG.fine("Interrupted while setting up new watcher. Going to state out of sync."); isSynchronizedWithZooKeeper.set(false); return; } // No exceptions, let's celebrate with a log message. LOG.info("Claim processed ok, path: " + path); claimedCoordinate.sendEventToCoordinateListener( CoordinateListener.Event.COORDINATE_OK, "claimed"); return; case NODEEXISTS: // Someone has already claimed the coordinate. It might have been us in a // different thread. If we already have claimed the coordinate then don't care. // Else notify the client. If everything is fine, this is not a true negative, // so ignore it. It might happen if two attempts to tryClaim the coordinate run // in parallel. if (isSynchronizedWithZooKeeper.get() && started.get()) { LOG.fine("Everything is fine, ignoring NODEEXISTS message, path: " + path); return; } LOG.info("Claimed fail, node already exists, will retry: " + path); claimedCoordinate.sendEventToCoordinateListener( CoordinateListener.Event.NOT_OWNER, "Node already exists."); LOG.info("isSynchronizedWithZooKeeper: " + isSynchronizedWithZooKeeper.get()); checkVersion.set(true); return; case NONODE: LOG.info("Could not claim due to missing coordinate, path: " + path); claimedCoordinate.sendEventToCoordinateListener( CoordinateListener.Event.NOT_OWNER, "No node on claiming coordinate: " + returnCode.name()); return; default: // Random problem, report the problem to the client. claimedCoordinate.sendEventToCoordinateListener( CoordinateListener.Event.NO_CONNECTION_TO_STORAGE, "Could not reclaim coordinate. Return code: " + returnCode.name()); return; } }
@Override public ScriptContext call() throws Exception { try { Scanner scn = new Scanner(znodePath); scn.useDelimiter(":"); String hostName = scn.next(); // host name String instance = scn.next(); // instance int infoPort = Integer.parseInt(scn.next()); // UI info port long serverStartTimestamp = Long.parseLong(scn.next()); scn.close(); // Get the --config property from classpath...it's always first // in the classpath String cp = System.getProperty("java.class.path"); scn = new Scanner(cp); scn.useDelimiter(":"); String confDir = scn.next(); scn.close(); LOG.debug("conf dir [" + confDir + "]"); // Get -Dwms.home.dir String wmsHome = System.getProperty("wms.home.dir"); // If stop-wms.sh is executed and WMS_MANAGES_ZK then zookeeper // is stopped abruptly. // Second scenario is when ZooKeeper fails for some reason // regardless of whether WMS // manages it. When either happens the WmsServer running znodes // still exist in ZooKeeper // and we see them at next startup. When they eventually timeout // we get node deleted events for a server that no longer // exists. So, only recognize // WmsServer running znodes that have timestamps after last // WmsMaster startup. if (serverStartTimestamp > startupTimestamp) { scriptContext.setHostName(hostName); scriptContext.setScriptName("sys_shell.py"); if (hostName.equalsIgnoreCase(ia.getCanonicalHostName())) scriptContext.setCommand( "bin/wms-daemon.sh --config " + confDir + " start server " + instance); else scriptContext.setCommand( "pdsh -w " + hostName + " \"cd " + wmsHome + ";bin/wms-daemon.sh --config " + confDir + " start server " + instance + "\""); RetryCounter retryCounter = retryCounterFactory.create(); while (true) { if (scriptContext.getStdOut().length() > 0) scriptContext.getStdOut().delete(0, scriptContext.getStdOut().length()); if (scriptContext.getStdErr().length() > 0) scriptContext.getStdErr().delete(0, scriptContext.getStdErr().length()); LOG.info( "Restarting WmsServer [" + hostName + ":" + instance + "], script [ " + scriptContext.toString() + " ]"); ScriptManager.getInstance().runScript(scriptContext); if (scriptContext.getExitCode() == 0) { LOG.info("WmsServer [" + hostName + ":" + instance + "] restarted"); break; } else { StringBuilder sb = new StringBuilder(); sb.append("exit code [" + scriptContext.getExitCode() + "]"); if (!scriptContext.getStdOut().toString().isEmpty()) sb.append(", stdout [" + scriptContext.getStdOut().toString() + "]"); if (!scriptContext.getStdErr().toString().isEmpty()) sb.append(", stderr [" + scriptContext.getStdErr().toString() + "]"); LOG.error(sb.toString()); if (!retryCounter.shouldRetry()) { LOG.error( "WmsServer [" + hostName + ":" + instance + "] restart failed after " + retryCounter.getMaxRetries() + " retries"); break; } else { retryCounter.sleepUntilNextRetry(); retryCounter.useRetry(); } } } } else { LOG.debug( "No restart for " + znodePath + "\nbecause WmsServer start time [" + DateFormat.getDateTimeInstance().format(new Date(serverStartTimestamp)) + "] was before WmsMaster start time [" + DateFormat.getDateTimeInstance().format(new Date(startupTimestamp)) + "]"); } } catch (Exception e) { e.printStackTrace(); LOG.error(e); } return scriptContext; }
private void processPath(FileObject fileObject, Project project) throws CacheCorruptedException { File file = fileObject.getFile(); final String path = file.getPath(); try { if (CompilerManager.MAKE_ENABLED) { byte[] fileContent = fileObject.getContent(); // the file is assumed to exist! final JavaDependencyCache dependencyCache = myCompileContext.getDependencyCache().findChild(JavaDependencyCache.class); final int newClassQName = dependencyCache.reparseClassFile(file, fileContent); final Cache newClassesCache = dependencyCache.getNewClassesCache(); final String sourceFileName = newClassesCache.getSourceFileName(newClassQName); final String qName = dependencyCache.resolve(newClassQName); String relativePathToSource = "/" + JavaMakeUtil.createRelativePathToSource(qName, sourceFileName); putName(sourceFileName, newClassQName, relativePathToSource, path); boolean haveToInstrument = myAddNotNullAssertions && hasNotNullAnnotations( newClassesCache, dependencyCache.getSymbolTable(), newClassQName, project); if (haveToInstrument) { try { ClassReader reader = new ClassReader(fileContent, 0, fileContent.length); ClassWriter writer = new PsiClassWriter(myProject, myIsJdk16); if (NotNullVerifyingInstrumenter.processClassFile(reader, writer)) { fileObject = new FileObject(file, writer.toByteArray()); } } catch (Exception ignored) { LOG.info(ignored); } } fileObject.save(); } else { final String _path = FileUtil.toSystemIndependentName(path); final int dollarIndex = _path.indexOf('$'); final int tailIndex = dollarIndex >= 0 ? dollarIndex : _path.length() - ".class".length(); final int slashIndex = _path.lastIndexOf('/'); final String sourceFileName = _path.substring(slashIndex + 1, tailIndex) + ".java"; String relativePathToSource = _path.substring(myOutputDir.length(), tailIndex) + ".java"; putName( sourceFileName, 0 /*doesn't matter here*/, relativePathToSource.startsWith("/") ? relativePathToSource : "/" + relativePathToSource, path); } } catch (ClsFormatException e) { final String m = e.getMessage(); String message = CompilerBundle.message( "error.bad.class.file.format", StringUtil.isEmpty(m) ? path : m + "\n" + path); myCompileContext.addMessage(CompilerMessageCategory.ERROR, message, null, -1, -1); LOG.info(e); } catch (IOException e) { myCompileContext.addMessage(CompilerMessageCategory.ERROR, e.getMessage(), null, -1, -1); LOG.info(e); } finally { myStatistics.incClassesCount(); updateStatistics(); } }
private Operation parseMethod(Method method) { Operation operation = new Operation(); RequestMapping requestMapping = method.getAnnotation(RequestMapping.class); Class<?> responseClass = null; List<String> produces = new ArrayList<String>(); List<String> consumes = new ArrayList<String>(); String responseContainer = null; String operationId = method.getName(); Map<String, Property> defaultResponseHeaders = null; Set<Map<String, Object>> customExtensions = null; ApiOperation apiOperation = method.getAnnotation(ApiOperation.class); if (apiOperation.hidden()) return null; if (!"".equals(apiOperation.nickname())) operationId = apiOperation.nickname(); defaultResponseHeaders = parseResponseHeaders(apiOperation.responseHeaders()); operation.summary(apiOperation.value()).description(apiOperation.notes()); customExtensions = parseCustomExtensions(apiOperation.extensions()); if (customExtensions != null) { for (Map<String, Object> extension : customExtensions) { if (extension != null) { for (Map.Entry<String, Object> map : extension.entrySet()) { operation.setVendorExtension( map.getKey().startsWith("x-") ? map.getKey() : "x-" + map.getKey(), map.getValue()); } } } } if (apiOperation.response() != null && !Void.class.equals(apiOperation.response())) responseClass = apiOperation.response(); if (!"".equals(apiOperation.responseContainer())) responseContainer = apiOperation.responseContainer(); /// security if (apiOperation.authorizations() != null) { List<SecurityRequirement> securities = new ArrayList<SecurityRequirement>(); for (Authorization auth : apiOperation.authorizations()) { if (auth.value() != null && !"".equals(auth.value())) { SecurityRequirement security = new SecurityRequirement(); security.setName(auth.value()); AuthorizationScope[] scopes = auth.scopes(); for (AuthorizationScope scope : scopes) { if (scope.scope() != null && !"".equals(scope.scope())) { security.addScope(scope.scope()); } } securities.add(security); } } if (securities.size() > 0) { for (SecurityRequirement sec : securities) operation.security(sec); } } if (responseClass == null) { // pick out response from method declaration LOG.info("picking up response class from method " + method); Type t = method.getGenericReturnType(); responseClass = method.getReturnType(); if (responseClass.equals(ResponseEntity.class)) { responseClass = getGenericSubtype(method.getReturnType(), method.getGenericReturnType()); } if (!responseClass.equals(Void.class) && !"void".equals(responseClass.toString()) && responseClass.getAnnotation(Api.class) == null) { LOG.info("reading model " + responseClass); Map<String, Model> models = ModelConverters.getInstance().readAll(t); } } if (responseClass != null && !responseClass.equals(Void.class) && !responseClass.equals(ResponseEntity.class) && responseClass.getAnnotation(Api.class) == null) { if (isPrimitive(responseClass)) { Property responseProperty = null; Property property = ModelConverters.getInstance().readAsProperty(responseClass); if (property != null) { if ("list".equalsIgnoreCase(responseContainer)) responseProperty = new ArrayProperty(property); else if ("map".equalsIgnoreCase(responseContainer)) responseProperty = new MapProperty(property); else responseProperty = property; operation.response( 200, new Response() .description("successful operation") .schema(responseProperty) .headers(defaultResponseHeaders)); } } else if (!responseClass.equals(Void.class) && !"void".equals(responseClass.toString())) { Map<String, Model> models = ModelConverters.getInstance().read(responseClass); if (models.size() == 0) { Property pp = ModelConverters.getInstance().readAsProperty(responseClass); operation.response( 200, new Response() .description("successful operation") .schema(pp) .headers(defaultResponseHeaders)); } for (String key : models.keySet()) { Property responseProperty = null; if ("list".equalsIgnoreCase(responseContainer)) responseProperty = new ArrayProperty(new RefProperty().asDefault(key)); else if ("map".equalsIgnoreCase(responseContainer)) responseProperty = new MapProperty(new RefProperty().asDefault(key)); else responseProperty = new RefProperty().asDefault(key); operation.response( 200, new Response() .description("successful operation") .schema(responseProperty) .headers(defaultResponseHeaders)); swagger.model(key, models.get(key)); } models = ModelConverters.getInstance().readAll(responseClass); for (String key : models.keySet()) { swagger.model(key, models.get(key)); } } } operation.operationId(operationId); if (requestMapping.produces() != null) { for (String str : Arrays.asList(requestMapping.produces())) { if (!produces.contains(str)) { produces.add(str); } } } if (requestMapping.consumes() != null) { for (String str : Arrays.asList(requestMapping.consumes())) { if (!consumes.contains(str)) { consumes.add(str); } } } ApiResponses responseAnnotation = method.getAnnotation(ApiResponses.class); if (responseAnnotation != null) { updateApiResponse(operation, responseAnnotation); } else { ResponseStatus responseStatus = method.getAnnotation(ResponseStatus.class); if (responseStatus != null) { operation.response( responseStatus.value().value(), new Response().description(responseStatus.reason())); } } boolean isDeprecated = false; Deprecated annotation = method.getAnnotation(Deprecated.class); if (annotation != null) isDeprecated = true; boolean hidden = false; if (apiOperation != null) hidden = apiOperation.hidden(); // process parameters Class[] parameterTypes = method.getParameterTypes(); Type[] genericParameterTypes = method.getGenericParameterTypes(); Annotation[][] paramAnnotations = method.getParameterAnnotations(); // paramTypes = method.getParameterTypes // genericParamTypes = method.getGenericParameterTypes for (int i = 0; i < parameterTypes.length; i++) { Type type = genericParameterTypes[i]; List<Annotation> annotations = Arrays.asList(paramAnnotations[i]); List<Parameter> parameters = getParameters(type, annotations); for (Parameter parameter : parameters) { operation.parameter(parameter); } } if (operation.getResponses() == null) { operation.defaultResponse(new Response().description("successful operation")); } // Process @ApiImplicitParams this.readImplicitParameters(method, operation); return operation; }
private void runAThread(ThreadedEntityProcessorWrapper epw, EntityRow rows, String currProcess) throws Exception { currentEntityProcWrapper.set(epw); epw.threadedInit(context); initEntity(); try { epw.init(rows); DocWrapper docWrapper = this.docWrapper; Context.CURRENT_CONTEXT.set(context); for (; ; ) { if (DocBuilder.this.stop.get()) break; try { Map<String, Object> arow = epw.nextRow(); if (arow == null) { break; } else { importStatistics.rowsCount.incrementAndGet(); if (docWrapper == null && entity.isDocRoot) { docWrapper = new DocWrapper(); context.setDoc(docWrapper); DataConfig.Entity e = entity.parentEntity; for (EntityRow row = rows; row != null && e != null; row = row.tail, e = e.parentEntity) { addFields(e, docWrapper, row.row, epw.resolver); } } if (docWrapper != null) { handleSpecialCommands(arow, docWrapper); addFields(entity, docWrapper, arow, epw.resolver); } if (entity.entities != null) { EntityRow nextRow = new EntityRow(arow, rows, entity.name); for (DataConfig.Entity e : entity.entities) { epw.children.get(e).run(docWrapper, currProcess, nextRow); } } } if (entity.isDocRoot) { LOG.info("a row on docroot" + docWrapper); if (!docWrapper.isEmpty()) { LOG.info("adding a doc " + docWrapper); boolean result = writer.upload(docWrapper); docWrapper = null; if (result) { importStatistics.docCount.incrementAndGet(); } else { importStatistics.failedDocCount.incrementAndGet(); } } } } catch (DataImportHandlerException dihe) { exception = dihe; if (dihe.getErrCode() == SKIP_ROW || dihe.getErrCode() == SKIP) { importStatistics.skipDocCount.getAndIncrement(); exception = null; // should not propogate up continue; } if (entity.isDocRoot) { if (dihe.getErrCode() == DataImportHandlerException.SKIP) { importStatistics.skipDocCount.getAndIncrement(); exception = null; // should not propogate up } else { LOG.error( "Exception while processing: " + entity.name + " document : " + docWrapper, dihe); } if (dihe.getErrCode() == DataImportHandlerException.SEVERE) throw dihe; } else { // if this is not the docRoot then the execution has happened in the same thread. so // propogate up, // it will be handled at the docroot entityEnded.set(true); throw dihe; } entityEnded.set(true); } } } finally { epw.destroy(); currentEntityProcWrapper.remove(); Context.CURRENT_CONTEXT.remove(); } }
public void assignToLeafTasks(LinkedList<TaskRequestEvent> taskRequests) { Collections.shuffle(taskRequests); LinkedList<TaskRequestEvent> remoteTaskRequests = new LinkedList<>(); String queryMasterHostAndPort = context .getMasterContext() .getQueryMasterContext() .getWorkerContext() .getConnectionInfo() .getHostAndQMPort(); TaskRequestEvent taskRequest; while (leafTasks.size() > 0 && (!taskRequests.isEmpty() || !remoteTaskRequests.isEmpty())) { int localAssign = 0; int rackAssign = 0; taskRequest = taskRequests.pollFirst(); if (taskRequest == null) { // if there are only remote task requests taskRequest = remoteTaskRequests.pollFirst(); } // checking if this container is still alive. // If not, ignore the task request and stop the task runner WorkerConnectionInfo connectionInfo = context.getMasterContext().getWorkerMap().get(taskRequest.getWorkerId()); if (connectionInfo == null) continue; // getting the hostname of requested node String host = connectionInfo.getHost(); // if there are no worker matched to the hostname a task request if (!leafTaskHostMapping.containsKey(host) && !taskRequests.isEmpty()) { String normalizedHost = NetUtils.normalizeHost(host); if (!leafTaskHostMapping.containsKey(normalizedHost)) { // this case means one of either cases: // * there are no blocks which reside in this node. // * all blocks which reside in this node are consumed, and this task runner requests a // remote task. // In this case, we transfer the task request to the remote task request list, and skip // the followings. remoteTaskRequests.add(taskRequest); continue; } else { host = normalizedHost; } } if (LOG.isDebugEnabled()) { LOG.debug( "assignToLeafTasks: " + taskRequest.getExecutionBlockId() + "," + "worker=" + connectionInfo.getHostAndPeerRpcPort()); } ////////////////////////////////////////////////////////////////////// // disk or host-local allocation ////////////////////////////////////////////////////////////////////// TaskAttemptId attemptId = allocateLocalTask(host); if (attemptId == null) { // if a local task cannot be found HostVolumeMapping hostVolumeMapping = leafTaskHostMapping.get(host); if (!taskRequests .isEmpty()) { // if other requests remains, move to remote list for better locality remoteTaskRequests.add(taskRequest); candidateWorkers.remove(connectionInfo.getId()); continue; } else { if (hostVolumeMapping != null) { int nodes = context.getMasterContext().getWorkerMap().size(); // this part is to control the assignment of tail and remote task balancing per node int tailLimit = 1; if (remainingScheduledObjectNum() > 0 && nodes > 0) { tailLimit = Math.max(remainingScheduledObjectNum() / nodes, 1); } if (hostVolumeMapping.getRemoteConcurrency() >= tailLimit) { // remote task throttling per node continue; } else { // assign to remote volume hostVolumeMapping.increaseConcurrency(HostVolumeMapping.REMOTE); } } } ////////////////////////////////////////////////////////////////////// // rack-local allocation ////////////////////////////////////////////////////////////////////// attemptId = allocateRackTask(host); ////////////////////////////////////////////////////////////////////// // random node allocation ////////////////////////////////////////////////////////////////////// if (attemptId == null && leafTaskNum() > 0) { synchronized (leafTasks) { attemptId = leafTasks.iterator().next(); leafTasks.remove(attemptId); } } if (attemptId != null && hostVolumeMapping != null) { hostVolumeMapping.lastAssignedVolumeId.put(attemptId, HostVolumeMapping.REMOTE); } rackAssign++; } else { localAssign++; } if (attemptId != null) { Task task = stage.getTask(attemptId.getTaskId()); TaskRequest taskAssign = new TaskRequestImpl( attemptId, new ArrayList<>(task.getAllFragments()), "", false, LogicalNodeSerializer.serialize(task.getLogicalPlan()), context.getMasterContext().getQueryContext(), stage.getDataChannel(), stage.getBlock().getEnforcer(), queryMasterHostAndPort); if (checkIfInterQuery(stage.getMasterPlan(), stage.getBlock())) { taskAssign.setInterQuery(); } // TODO send batch request BatchAllocationRequest.Builder requestProto = BatchAllocationRequest.newBuilder(); requestProto.addTaskRequest( TaskAllocationProto.newBuilder() .setResource(taskRequest.getResponseProto().getResource()) .setTaskRequest(taskAssign.getProto()) .build()); requestProto.setExecutionBlockId(attemptId.getTaskId().getExecutionBlockId().getProto()); context .getMasterContext() .getEventHandler() .handle(new TaskAttemptAssignedEvent(attemptId, connectionInfo)); InetSocketAddress addr = stage.getAssignedWorkerMap().get(connectionInfo.getId()); if (addr == null) addr = new InetSocketAddress(connectionInfo.getHost(), connectionInfo.getPeerRpcPort()); AsyncRpcClient tajoWorkerRpc = null; CallFuture<BatchAllocationResponse> callFuture = new CallFuture<>(); totalAttempts++; try { tajoWorkerRpc = RpcClientManager.getInstance() .getClient(addr, TajoWorkerProtocol.class, true, rpcParams); TajoWorkerProtocol.TajoWorkerProtocolService tajoWorkerRpcClient = tajoWorkerRpc.getStub(); tajoWorkerRpcClient.allocateTasks( callFuture.getController(), requestProto.build(), callFuture); BatchAllocationResponse responseProto = callFuture.get(RpcConstants.FUTURE_TIMEOUT_SECONDS_DEFAULT, TimeUnit.SECONDS); if (responseProto.getCancellationTaskCount() > 0) { for (TaskAllocationProto proto : responseProto.getCancellationTaskList()) { cancel(task.getAttempt(new TaskAttemptId(proto.getTaskRequest().getId()))); cancellation++; } if (LOG.isDebugEnabled()) { LOG.debug( "Canceled requests: " + responseProto.getCancellationTaskCount() + " from " + addr); } continue; } } catch (Exception e) { LOG.error(e); } scheduledObjectNum--; totalAssigned++; hostLocalAssigned += localAssign; rackLocalAssigned += rackAssign; if (rackAssign > 0) { LOG.info( String.format( "Assigned Local/Rack/Total: (%d/%d/%d), " + "Attempted Cancel/Assign/Total: (%d/%d/%d), " + "Locality: %.2f%%, Rack host: %s", hostLocalAssigned, rackLocalAssigned, totalAssigned, cancellation, totalAssigned, totalAttempts, ((double) hostLocalAssigned / (double) totalAssigned) * 100, host)); } } else { throw new RuntimeException("Illegal State!!!!!!!!!!!!!!!!!!!!!"); } } }