/** * Set up the root fragment (which will run locally), and submit it for execution. * * @param rootFragment * @param rootOperator * @throws ExecutionSetupException */ private void setupRootFragment(final PlanFragment rootFragment, final FragmentRoot rootOperator) throws ExecutionSetupException { @SuppressWarnings("resource") final FragmentContext rootContext = new FragmentContext( drillbitContext, rootFragment, queryContext, initiatingClient, drillbitContext.getFunctionImplementationRegistry()); @SuppressWarnings("resource") final IncomingBuffers buffers = new IncomingBuffers(rootFragment, rootContext); rootContext.setBuffers(buffers); queryManager.addFragmentStatusTracker(rootFragment, true); rootRunner = new FragmentExecutor( rootContext, rootFragment, queryManager.newRootStatusHandler(rootContext, drillbitContext), rootOperator); final RootFragmentManager fragmentManager = new RootFragmentManager(rootFragment.getHandle(), buffers, rootRunner); if (buffers.isDone()) { // if we don't have to wait for any incoming data, start the fragment runner. bee.addFragmentRunner(fragmentManager.getRunnable()); } else { // if we do, record the fragment manager in the workBus. // TODO aren't we managing our own work? What does this do? It looks like this will never get // run drillbitContext.getWorkBus().addFragmentManager(fragmentManager); } }
/** onInit */ protected void onInit() { Assert.state(m_queryMgr != null, "must set the queryManager property"); Assert.state(m_linkdConfig != null, "must set the linkdConfig property"); Assert.state(m_scheduler != null, "must set the scheduler property"); Assert.state(m_eventForwarder != null, "must set the eventForwarder property"); // FIXME: circular dependency m_queryMgr.setLinkd(this); m_activepackages = new ArrayList<String>(); // initialize the ipaddrsentevents m_newSuspectEventsIpAddr = new TreeSet<InetAddress>(new InetAddressComparator()); m_newSuspectEventsIpAddr.add(addr("127.0.0.1")); m_newSuspectEventsIpAddr.add(addr("0.0.0.0")); try { m_nodes = m_queryMgr.getSnmpNodeList(); m_queryMgr.updateDeletedNodes(); } catch (SQLException e) { LogUtils.errorf(this, e, "SQL exception executing on database"); throw new UndeclaredThrowableException(e); } Assert.notNull(m_nodes); scheduleCollection(); LogUtils.infof(this, "init: LINKD CONFIGURATION INITIALIZED"); }
private void runPhysicalPlan(final PhysicalPlan plan) throws ExecutionSetupException { validatePlan(plan); setupSortMemoryAllocations(plan); acquireQuerySemaphore(plan); final QueryWorkUnit work = getQueryWorkUnit(plan); final List<PlanFragment> planFragments = work.getFragments(); final PlanFragment rootPlanFragment = work.getRootFragment(); assert queryId == rootPlanFragment.getHandle().getQueryId(); drillbitContext .getWorkBus() .addFragmentStatusListener(queryId, queryManager.getFragmentStatusListener()); drillbitContext .getClusterCoordinator() .addDrillbitStatusListener(queryManager.getDrillbitStatusListener()); logger.debug("Submitting fragments to run."); // set up the root fragment first so we'll have incoming buffers available. setupRootFragment(rootPlanFragment, work.getRootOperator()); setupNonRootFragments(planFragments); drillbitContext.getAllocator().resetFragmentLimits(); // TODO a global effect for this query?!? moveToState(QueryState.RUNNING, null); logger.debug("Fragments running."); }
public boolean scheduleNodeCollection(int nodeid) { LinkableNode node = null; // database changed need reload packageiplist m_linkdConfig.updatePackageIpListMap(); // First of all get Linkable Node LogUtils.debugf(this, "scheduleNodeCollection: Loading node %d from database", nodeid); try { node = m_queryMgr.getSnmpNode(nodeid); if (node == null) { LogUtils.warnf( this, "scheduleNodeCollection: Failed to get linkable node from database with ID %d. Exiting", nodeid); return false; } } catch (final SQLException sqlE) { LogUtils.errorf( this, sqlE, "scheduleNodeCollection: SQL Exception while syncing node object with ID %d with database information.", nodeid); return false; } synchronized (m_nodes) { LogUtils.debugf(this, "adding node %s to the collection", node); m_nodes.add(node); } scheduleCollectionForNode(node); return true; }
private void runSQL(final String sql) throws ExecutionSetupException { final DrillSqlWorker sqlWorker = new DrillSqlWorker(queryContext); final Pointer<String> textPlan = new Pointer<>(); final PhysicalPlan plan = sqlWorker.getPlan(sql, textPlan); queryManager.setPlanText(textPlan.value); runPhysicalPlan(plan); }
/** * Method that updates info in List nodes and also save info into database. This method is called * by SnmpCollection after all stuff is done * * @param snmpcoll */ @Transactional public void updateNodeSnmpCollection(final SnmpCollection snmpcoll) { LogUtils.debugf( this, "Updating SNMP collection for %s", InetAddressUtils.str(snmpcoll.getTarget())); LinkableNode node = removeNode(snmpcoll.getTarget()); if (node == null) { LogUtils.errorf( this, "No node found for SNMP collection: %s unscheduling!", snmpcoll.getInfo()); m_scheduler.unschedule(snmpcoll); return; } try { node = m_queryMgr.storeSnmpCollection(node, snmpcoll); } catch (SQLException e) { LogUtils.errorf( this, e, "Failed to save on db snmpcollection/package: %s/%s", snmpcoll.getPackageName(), snmpcoll.getInfo()); return; } if (node != null) { synchronized (m_nodes) { m_nodes.add(node); } } }
/** * Resume the query. Regardless of the current state, this method sends a resume signal to all * fragments. This method can be called multiple times. */ public void resume() { resume = true; // resume all pauses through query context queryContext.getExecutionControls().unpauseAll(); // resume all pauses through all fragment contexts queryManager.unpauseExecutingFragments(drillbitContext); }
/** * Set up the root fragment (which will run locally), and submit it for execution. * * @param rootFragment * @param rootOperator * @throws ExecutionSetupException */ private void setupRootFragment(final PlanFragment rootFragment, final FragmentRoot rootOperator) throws ExecutionSetupException { @SuppressWarnings("resource") final FragmentContext rootContext = new FragmentContext( drillbitContext, rootFragment, queryContext, initiatingClient, drillbitContext.getFunctionImplementationRegistry()); @SuppressWarnings("resource") final IncomingBuffers buffers = new IncomingBuffers(rootFragment, rootContext); rootContext.setBuffers(buffers); queryManager.addFragmentStatusTracker(rootFragment, true); final ControlTunnel tunnel = drillbitContext.getController().getTunnel(queryContext.getCurrentEndpoint()); final FragmentExecutor rootRunner = new FragmentExecutor( rootContext, rootFragment, new FragmentStatusReporter(rootContext, tunnel), rootOperator); final RootFragmentManager fragmentManager = new RootFragmentManager(rootFragment.getHandle(), buffers, rootRunner); if (buffers.isDone()) { // if we don't have to wait for any incoming data, start the fragment runner. bee.addFragmentRunner(fragmentManager.getRunnable()); } else { // if we do, record the fragment manager in the workBus. drillbitContext.getWorkBus().addFragmentManager(fragmentManager); } }
@Override public void deleteExpiredKeys() { try { queryManager.deleteExpiredKeys(); } catch (SQLException e) { // TODO Auto-generated catch block e.printStackTrace(); } }
@DELETE @Path("{id}") @Produces(MediaType.APPLICATION_JSON) public Response getServer(@PathParam("id") String queryId) { if (log.isDebugEnabled()) { log.debug("Received cancel request for query [%s]", queryId); } queryManager.cancelQuery(queryId); return Response.status(Response.Status.ACCEPTED).build(); }
/** * Method that uses info in hash snmpprimaryip2nodes and also save info into database. This method * is called by DiscoveryLink after all stuff is done * * @param discover */ void updateDiscoveryLinkCollection(final DiscoveryLink discover) { try { m_queryMgr.storeDiscoveryLink(discover); } catch (SQLException e) { LogUtils.errorf( this, e, "Failed to save discoverylink on database for package: %s", discover.getPackageName()); } }
public void cancel() { if (isFinished()) { return; } // cancel remote fragments. fragmentManager.cancel(); QueryResult result = QueryResult.newBuilder() .setQueryState(QueryState.CANCELED) .setIsLastChunk(true) .setQueryId(queryId) .build(); cleanupAndSendResult(result); }
/** * Update database when an interface is deleted * * @param nodeid the nodeid for the node * @param ipAddr the ip address of the interface * @param ifIndex the ifIndex of the interface */ void deleteInterface(int nodeid, String ipAddr, int ifIndex) { LogUtils.debugf( this, "deleteInterface: marking table entries as deleted for node %d with IP address %s and ifIndex %s", nodeid, ipAddr, (ifIndex > -1 ? "" + ifIndex : "N/A")); try { m_queryMgr.updateForInterface(nodeid, ipAddr, ifIndex, QueryManager.ACTION_DELETE); } catch (SQLException sqlE) { LogUtils.errorf(this, sqlE, "deleteInterface: SQL Exception while updating database."); } // database changed need reload packageiplist m_linkdConfig.updatePackageIpListMap(); }
void deleteNode(int nodeid) { LogUtils.debugf(this, "deleteNode: deleting LinkableNode for node %s", nodeid); try { m_queryMgr.update(nodeid, QueryManager.ACTION_DELETE); } catch (SQLException sqlE) { LogUtils.errorf( this, sqlE, "deleteNode: SQL Exception while syncing node object with database information."); } LinkableNode node = removeNode(nodeid); if (node == null) { LogUtils.warnf(this, "deleteNode: node not found: %d", nodeid); } else { Collection<SnmpCollection> collections = getSnmpCollections(nodeid, node.getSnmpPrimaryIpAddr(), node.getSysoid()); LogUtils.debugf( this, "deleteNode: fetched SnmpCollections from scratch, iterating over %d objects to wake them up", collections.size()); for (SnmpCollection collection : collections) { ReadyRunnable rr = getReadyRunnable(collection); if (rr == null) { LogUtils.warnf(this, "deleteNode: found null ReadyRunnable"); return; } else { rr.unschedule(); } } } // database changed need reload packageiplist m_linkdConfig.updatePackageIpListMap(); }
void suspendNodeCollection(int nodeid) { LogUtils.debugf( this, "suspendNodeCollection: suspend collection LinkableNode for node %d", nodeid); try { m_queryMgr.update(nodeid, QueryManager.ACTION_UPTODATE); } catch (SQLException sqlE) { LogUtils.errorf( this, sqlE, "suspendNodeCollection: SQL Exception while syncing node object with database information."); } LinkableNode node = getNode(nodeid); if (node == null) { LogUtils.warnf(this, "suspendNodeCollection: found null ReadyRunnable"); } else { // get collections // get readyRunnuble // suspend RR Collection<SnmpCollection> collections = getSnmpCollections(nodeid, node.getSnmpPrimaryIpAddr(), node.getSysoid()); LogUtils.debugf( this, "suspendNodeCollection: fetched SnmpCollections from scratch, iterating over %d objects to wake them up", collections.size()); for (SnmpCollection collection : collections) { ReadyRunnable rr = getReadyRunnable(collection); if (rr == null) { LogUtils.warnf(this, "suspendNodeCollection: suspend: node not found: %d", nodeid); return; } else { rr.suspend(); } } } }
public boolean runSingleCollection(final int nodeId) { try { final LinkableNode node = m_queryMgr.getSnmpNode(nodeId); for (final SnmpCollection snmpColl : getSnmpCollections(nodeId, node.getSnmpPrimaryIpAddr(), node.getSysoid())) { snmpColl.setScheduler(m_scheduler); snmpColl.run(); final DiscoveryLink link = getDiscoveryLink(snmpColl.getPackageName()); link.setScheduler(m_scheduler); link.run(); } return true; } catch (final SQLException e) { LogUtils.debugf( this, "runSingleCollection: unable to get linkable node from database with ID %d", nodeId); } return false; }
/** * 初始化:读取配置信息 * * @return boolean */ public boolean init() { // 页面大小 this.pageSize = JdomUtils.getAttributeIntValue(eleConfig, "pageSize", DEFAULT_PAGE_SIZE); // 从配置文件中读取代理类名并实例化代理类 this.agentClassName = JdomUtils.getAttributeValue(eleConfig, "agentClass", ""); if (agentClassName.length() > 0) { try { this.agentClass = (IValueFetch) ClassUtils.newInstance(agentClassName); } catch (RuntimeException ex) { QueryManager.logError(logger, this.query, "结果获取代理类(" + agentClassName + ")实例化失败"); } } if (agentClass == null) agentClass = this; // 读取是否首次显示 this.firstShow = JdomUtils.getAttributeBooleanValue(eleConfig, "firstShow", true); // 读取Sql配置信息 Element eleSql = eleConfig.getChild("sql"); if (eleSql == null) { QueryManager.logError(logger, query, "没有配置valuefetch中的sql"); return false; } // 读取Cache配置 Element eleCache = eleConfig.getChild("cache"); if (eleCache != null) { this.needCache = true; this.needRadom = JdomUtils.getAttributeBooleanValue(eleCache, "radom", false); this.cacheLimit = JdomUtils.getAttributeIntValue(eleCache, "radom", DEFAULT_CACHE_LIMIT); } // 读取Sql配置 this.sqlSelect = StringUtils.trimToEmpty(eleSql.getChildText("SqlSelect")); this.sqlStringFromWhere = new SqlString(StringUtils.trimToEmpty(eleSql.getChildText("SqlFromWhere"))); this.sqlSum = StringUtils.trimToEmpty(eleSql.getChildTextTrim("SqlSum")); this.sqlOrder = StringUtils.trimToEmpty(eleSql.getChildTextTrim("SqlOrder")); // 读取Group定义 Element eleSqlGroup = eleConfig.getChild("sql").getChild("SqlGroup"); if (eleSqlGroup != null) { drillURL = JdomUtils.getAttributeValue(eleSqlGroup, "drillURL"); this.needTotalX = JdomUtils.getAttributeBooleanValue(eleSqlGroup, "needTotalX", true); this.needTotalY = JdomUtils.getAttributeBooleanValue(eleSqlGroup, "needTotalY", true); // 统计缺省第一次不运行 firstShow = false; this.pageSize = -1; groupList = new ArrayList(); groupMap = new HashMap(); groupListY = new ArrayList(); groupListX = new ArrayList(); groupListSelect = new ArrayList(); Iterator iterator = eleSqlGroup.getChildren("Group").iterator(); while (iterator.hasNext()) { Element eleGroup = (Element) iterator.next(); String groupName = JdomUtils.getAttributeValue(eleGroup, "groupName"); String groupLabel = JdomUtils.getAttributeValue(eleGroup, "groupLabel"); String metadatasql = JdomUtils.getAttributeValue(eleGroup, "metadatasql"); String macrovalue = JdomUtils.getAttributeValue(eleGroup, "macrovalue"); String position = JdomUtils.getAttributeValue(eleGroup, "position"); Group group = new Group(groupName, groupLabel, metadatasql, macrovalue, position); if (JdomUtils.getAttributeBooleanValue(eleGroup, "subTotal", false)) group.needSubTotal = true; group.needDrill = JdomUtils.getAttributeBooleanValue(eleGroup, "drill", false); if (!group.metadatasql.equals("") && !group.metadataSqlString.needParse()) group.metadata = JdbcUtils.queryForList(metadatasql); if (StringUtils.contains(position, "y")) groupListY.add(group); else if (StringUtils.contains(position, "x")) groupListX.add(group); else groupListSelect.add(group); groupMap.put(group.groupName, group); Collections.sort(groupListY); Collections.sort(groupListX); Collections.sort(groupListSelect); } groupList.addAll(groupListX); groupList.addAll(groupListY); // 读取统计量配置 ArrayList sqlGroupSelectList = new ArrayList(); sqlGroupSelect = new MultiCheckbox(); sqlGroupSelect.name = "sqlGroupSelect"; Iterator iteratorSqlGroupSelect = eleConfig .getChild("sql") .getChild("SqlGroupSelect") .getChildren("GroupSelect") .iterator(); while (iteratorSqlGroupSelect.hasNext()) { Element eleGroupSelect = (Element) iteratorSqlGroupSelect.next(); String label = JdomUtils.getAttributeValue(eleGroupSelect, "label"); String value = JdomUtils.getAttributeValue(eleGroupSelect, "value"); String addvalue = JdomUtils.getAttributeValue(eleGroupSelect, "addvalue"); Checkbox groupSelect = new Checkbox("", label, value, addvalue, true); sqlGroupSelectList.add(groupSelect); } this.sqlGroupSelect.metadata = sqlGroupSelectList; // 读取Group关系定义 Element eleSqlGroupRelation = eleConfig.getChild("sql").getChild("SqlGroupRelation"); groupRelationList = new ArrayList(); if (eleSqlGroupRelation != null) { iterator = eleSqlGroupRelation.getChildren("GroupRelation").iterator(); while (iterator.hasNext()) { Element eleGroupRelation = (Element) iterator.next(); String groupName1 = JdomUtils.getAttributeValue(eleGroupRelation, "groupName1"); String groupName2 = JdomUtils.getAttributeValue(eleGroupRelation, "groupName2"); String relationsql = JdomUtils.getAttributeValue(eleGroupRelation, "relationsql"); ArrayList relation = JdbcUtils.queryForList(relationsql); GroupRelation groupRelation = new GroupRelation(groupName1, groupName2, relation); groupRelationList.add(groupRelation); } } } if ((sqlSelect.length() == 0 && groupList == null) || sqlStringFromWhere.getSql().length() == 0) { QueryManager.logError(logger, query, "没有配置valuefetch中的sql中的SqlSelect或者SqlFromWhere"); return false; } needCount = JdomUtils.getAttributeBooleanValue(eleSql, "count", true); // 读取宏替换配置 Element elementReplaces = eleConfig.getChild("sql").getChild("replaces"); replaceMap = new HashMap(); if (elementReplaces != null) { Iterator iterator = elementReplaces.getChildren().iterator(); while (iterator.hasNext()) { Element elementReplace = (Element) iterator.next(); String replacename = JdomUtils.getAttributeValue(elementReplace, "replacename", ""); ArrayList paraReplaceList = new ArrayList(); MacroReplace replace = null; if (elementReplace.getAttributeValue("replacevalue") == null) // 配置放在下面的parareplace节点 { Iterator iteratorParaReplace = elementReplace.getChildren().iterator(); while (iteratorParaReplace.hasNext()) { Element elementParaReplace = (Element) iteratorParaReplace.next(); String conditionExpr = JdomUtils.getAttributeValue(elementParaReplace, "conditionExpr", ""); String paraname = JdomUtils.getAttributeValue(elementParaReplace, "paraname", ""); String paravalue = JdomUtils.getAttributeValue(elementParaReplace, "paravalue", ""); String relation = JdomUtils.getAttributeValue(elementParaReplace, "relation", "!="); String replacevalue = JdomUtils.getAttributeValue(elementParaReplace, "replacevalue", ""); replace = new MacroReplace(conditionExpr, paraname, paravalue, relation, replacevalue); paraReplaceList.add(replace); } replaceMap.put(replacename, paraReplaceList); } else // 配置就放在replace节点 { String conditionExpr = JdomUtils.getAttributeValue(elementReplace, "conditionExpr", ""); String paraname = JdomUtils.getAttributeValue(elementReplace, "paraname", ""); String paravalue = JdomUtils.getAttributeValue(elementReplace, "paravalue", ""); String relation = JdomUtils.getAttributeValue(elementReplace, "paravalue", "!="); String replacevalue = JdomUtils.getAttributeValue(elementReplace, "replacevalue", ""); replace = new MacroReplace(conditionExpr, paraname, paravalue, relation, replacevalue); paraReplaceList.add(replace); replaceMap.put(replacename, paraReplaceList); } } } return true; }
public void salva(View arg0) { Utility.log("salva INI"); EditText etData = (EditText) findViewById(R.id.etData); EditText etRC = (EditText) findViewById(R.id.etRC); EditText etRS = (EditText) findViewById(R.id.etRS); data = etData.getText().toString(); rc = etRC.getText().toString(); rs = etRS.getText().toString(); String badValue[] = {null}; boolean procedi = true; procedi = Utility.checkDateField(data, badValue, getApplicationContext()); rc = Utility.checkTextField(rc, badValue, "", -1); rs = Utility.checkTextField(rs, badValue, "", -1); consegnato = Utility.checkTextField(consegnato, badValue, "0", 1); Utility.log("--- data " + data); Utility.log("--- rc " + rc); Utility.log("--- rs " + rs); Utility.log("--- consegnato " + consegnato); if (procedi == true) { // Create a new map of values, where column names are the keys ContentValues values = new ContentValues(); values.put(LocalDB.Assessment.COLUMN_NAME_DATA, data); values.put(LocalDB.Assessment.COLUMN_NAME_RC, rc); values.put(LocalDB.Assessment.COLUMN_NAME_RS, rs); values.put(LocalDB.Assessment.COLUMN_NAME_CONSEGNATO, consegnato); Utility.log("idAssessment " + idTupla); if (this.cliente_fk != -1) { values.put(LocalDB.Assessment.COLUMN_NAME_CLIENTE_FK, this.cliente_fk); } if (this.visita_fk != -1) { values.put(LocalDB.Assessment.COLUMN_NAME_VISITA_FK, this.visita_fk); } Utility.log("cliente_fk " + cliente_fk); Utility.log("visita_fk " + visita_fk); if (idTupla == -1) { /* * Queste sono per inserire una riga su una tabella */ QueryManager q = new QueryManager(getApplicationContext()); q.openDB(true); long numRow = q.insertRow(LocalDB.Assessment.TABLE_NAME, values); q.closeDB(); idTupla = (int) numRow; } else { // Which row to update, based on the ID String selection = LocalDB.Cliente._ID + " = ?"; String[] selectionArgs = {String.valueOf(idTupla)}; QueryManager q = new QueryManager(getApplicationContext()); q.openDB(true); q.updateRow(LocalDB.Assessment.TABLE_NAME, values, selection, selectionArgs); q.closeDB(); } this.finish(); } Utility.log("salva END"); }
@Before public void setUp() throws Exception { m_assertLevel = Level.WARN; // System.setProperty("mock.logLevel", "DEBUG"); // System.setProperty("mock.debug", "true"); MockUtil.println("------------ Begin Test --------------------------"); MockLogAppender.setupLogging(); m_network = new MockNetwork(); m_network.setCriticalService("ICMP"); m_network.addNode(1, "Router"); m_network.addInterface("192.168.1.1"); m_network.addService("ICMP"); m_network.addService("SMTP"); m_network.addService("SNMP"); m_network.addInterface("192.168.1.2"); m_network.addService("ICMP"); m_network.addService("SMTP"); m_network.addNode(2, "Server"); m_network.addInterface("192.168.1.3"); m_network.addService("ICMP"); m_network.addService("HTTP"); m_network.addService("SMTP"); m_network.addService("SNMP"); m_network.addNode(3, "Firewall"); m_network.addInterface("192.168.1.4"); m_network.addService("SMTP"); m_network.addService("HTTP"); m_network.addInterface("192.168.1.5"); m_network.addService("SMTP"); m_network.addService("HTTP"); m_network.addNode(4, "DownNode"); m_network.addInterface("192.168.1.6"); m_network.addService("SNMP"); // m_network.addInterface("fe80:0000:0000:0000:0231:f982:0123:4567"); // m_network.addService("SNMP"); m_db = new MockDatabase(); m_db.populate(m_network); DataSourceFactory.setInstance(m_db); // DemandPollDao demandPollDao = new DemandPollDaoHibernate(m_db); // demandPollDao.setAllocateIdStmt(m_db // .getNextSequenceValStatement("demandPollNxtId")); // m_demandPollDao = demandPollDao; m_pollerConfig = new MockPollerConfig(m_network); m_pollerConfig.setNextOutageIdSql(m_db.getNextOutageIdStatement()); m_pollerConfig.setNodeOutageProcessingEnabled(true); m_pollerConfig.setCriticalService("ICMP"); m_pollerConfig.addPackage("TestPackage"); m_pollerConfig.addDowntime(1000L, 0L, -1L, false); m_pollerConfig.setDefaultPollInterval(1000L); m_pollerConfig.populatePackage(m_network); m_pollerConfig.addPackage("TestPkg2"); m_pollerConfig.addDowntime(1000L, 0L, -1L, false); m_pollerConfig.setDefaultPollInterval(2000L); m_pollerConfig.addService(m_network.getService(2, "192.168.1.3", "HTTP")); m_anticipator = new EventAnticipator(); m_outageAnticipator = new OutageAnticipator(m_db); m_eventMgr = new MockEventIpcManager(); m_eventMgr.setEventWriter(m_db); m_eventMgr.setEventAnticipator(m_anticipator); m_eventMgr.addEventListener(m_outageAnticipator); m_eventMgr.setSynchronous(false); QueryManager queryManager = new DefaultQueryManager(); queryManager.setDataSource(m_db); DefaultPollContext pollContext = new DefaultPollContext(); pollContext.setEventManager(m_eventMgr); pollContext.setLocalHostName("localhost"); pollContext.setName("Test.DefaultPollContext"); pollContext.setPollerConfig(m_pollerConfig); pollContext.setQueryManager(queryManager); PollableNetwork network = new PollableNetwork(pollContext); m_poller = new Poller(); m_poller.setDataSource(m_db); m_poller.setEventManager(m_eventMgr); m_poller.setNetwork(network); m_poller.setQueryManager(queryManager); m_poller.setPollerConfig(m_pollerConfig); m_poller.setPollOutagesConfig(m_pollerConfig); MockOutageConfig config = new MockOutageConfig(); config.setGetNextOutageID(m_db.getNextOutageIdStatement()); RrdUtils.setStrategy(new NullRrdStrategy()); // m_outageMgr = new OutageManager(); // m_outageMgr.setEventMgr(m_eventMgr); // m_outageMgr.setOutageMgrConfig(config); // m_outageMgr.setDbConnectionFactory(m_db); }
@Override public void run() { procThread = Thread.currentThread(); procThread.setName(procThread.getName() + "-ExporterTask-gen" + (++genNo) + "-lane" + laneNo); AuxInfo auxInf = null; if (myEqFactory != null) auxInf = new AuxInfoImpl(myEqFactory); int objCount = 0; try { StringBuilder sb = new StringBuilder(); while (true) { if (needGroupLoop) { Collection<BioSampleGroup> grps = null; int restart = 0; while (true) { try { grps = sgQM.getGroups(); break; } catch (PersistenceException e) { sgQM.close(); restart++; stat.incRecoverAttempt(); if (restart > MaxErrorRecoverAttempts) throw e; } } if (grps.size() == 0) { log.debug("({}) No more groups to process", Thread.currentThread().getName()); needGroupLoop = false; } for (BioSampleGroup grp : grps) { if (stopFlag.get()) { log.debug( "({}) Stop flag set. Sending FINISH message", Thread.currentThread().getName()); putIntoQueue(controlQueue, new ControlMessage(Type.PROCESS_FINISH, this)); return; } if (!stat.addGroup(grp.getId())) continue; objCount++; // System.out.printf("=MSI (L%d-G%d-N%d) %s Processing group %s %n" // ,laneNo,genNo,msiCount,msi.getAcc(), g.getAcc()); stat.incGroupCounter(); boolean grpPub = AbstractXMLFormatter.isGroupPublic(grp, stat.getNowDate()); if (grpPub) stat.incGroupPublicCounter(); for (FormattingTask ft : tasks) { if (ft.getGroupQueue() == null) continue; if (!ft.confirmOutput()) continue; restart = 0; while (true) { try { sb.setLength(0); ft.getFormatter().exportGroup(grp, auxInf, sb, false); break; } catch (PersistenceException e) { restart++; stat.incRecoverAttempt(); if (restart > MaxErrorRecoverAttempts) throw e; } } putIntoQueue(ft.getGroupQueue(), sb.toString()); } if (hasGroupedSmp || maxObjsPerThr <= 0 || maxObjsPerThr - objCount > sgQM.getChunkSize()) { for (BioSample s : grp.getSamples()) { if (!hasGroupedSmp && maxObjsPerThr > 0 && maxObjsPerThr - objCount <= sgQM.getChunkSize()) break; if (!stat.addSample(s.getId())) continue; objCount++; countSample(s); for (FormattingTask ft : tasks) { if (ft.getSampleQueue() == null) continue; if (!ft.confirmOutput()) continue; restart = 0; while (true) { try { sb.setLength(0); ft.getFormatter().exportSample(s, auxInf, sb, false); break; } catch (PersistenceException e) { restart++; stat.incRecoverAttempt(); if (restart > MaxErrorRecoverAttempts) throw e; } } putIntoQueue(ft.getSampleQueue(), sb.toString()); } } } if (stopFlag.get()) { log.debug( "({}) Stop flag set. Sending FINISH message", Thread.currentThread().getName()); putIntoQueue(controlQueue, new ControlMessage(Type.PROCESS_FINISH, this)); return; } } } if (hasUngroupedSmp) { Collection<BioSample> smpls = null; int restart = 0; while (true) { try { smpls = sgQM.getSamples(); break; } catch (PersistenceException e) { sgQM.close(); restart++; stat.incRecoverAttempt(); if (restart > MaxErrorRecoverAttempts) throw e; } } if (smpls.size() == 0 && !needGroupLoop) { log.debug("({}) No more data to process", Thread.currentThread().getName()); break; } for (BioSample s : smpls) { if (!stat.addSample(s.getId())) continue; objCount++; if (stopFlag.get()) { log.debug( "({}) Stop flag set. Sending FINISH message", Thread.currentThread().getName()); putIntoQueue(controlQueue, new ControlMessage(Type.PROCESS_FINISH, this)); return; } countSample(s); for (FormattingTask ft : tasks) { if (ft.getSampleQueue() == null || ft.isGroupedSamplesOnly()) continue; if (!ft.confirmOutput()) continue; restart = 0; while (true) { try { sb.setLength(0); ft.getFormatter().exportSample(s, auxInf, sb, false); break; } catch (PersistenceException e) { restart++; stat.incRecoverAttempt(); if (restart > MaxErrorRecoverAttempts) throw e; } } putIntoQueue(ft.getSampleQueue(), sb.toString()); } } } else if (!needGroupLoop) { log.debug("({}) No more data to process", Thread.currentThread().getName()); break; } boolean needMoreData = false; for (FormattingTask ft : tasks) { if (ft.confirmOutput()) { needMoreData = true; break; } } if (!needMoreData) { log.debug( "({}) Output tasks don't need more data. Breaking loop", Thread.currentThread().getName()); break; } if (maxObjsPerThr > 0 && objCount >= maxObjsPerThr) { log.debug( "({}) Thread TTL expared. Processed {} objects. Sending TTL message", Thread.currentThread().getName(), objCount); if (auxInf != null) auxInf.destroy(); auxInf = null; sgQM.close(); putIntoQueue(controlQueue, new ControlMessage(Type.PROCESS_TTL, this)); return; } if (auxInf != null) auxInf.clear(); } log.debug( "({}) Thread terminating. Sending FINISH message", Thread.currentThread().getName()); putIntoQueue(controlQueue, new ControlMessage(Type.PROCESS_FINISH, this)); } catch (Throwable e) { e.printStackTrace(); putIntoQueue(controlQueue, new ControlMessage(Type.PROCESS_ERROR, this, e)); } finally { if (auxInf != null) auxInf.destroy(); sgQM.close(); } }
@Override public boolean deleteObject(Object object) { Species g = (Species) object; return queryManager.deleteObject(g); }
// Metodos comunes a todos los modelos , se llama el correspondiente de queryManager @Override public boolean insertObject(Object object) { Species g = (Species) object; return queryManager.saveObject(g); }
public List<Species> searchAllSpecies() { return queryManager.searchSpecie(); }
// retorna lista de patient para autocomplete public List<Species> searchAllSpeciesByName(String name) { return queryManager.searchSpecie(name); }
private void runPhysicalPlan(PhysicalPlan plan) { if (plan.getProperties().resultMode != ResultMode.EXEC) { fail( String.format( "Failure running plan. You requested a result mode of %s and a physical plan can only be output as EXEC", plan.getProperties().resultMode), new Exception()); } PhysicalOperator rootOperator = plan.getSortedOperators(false).iterator().next(); MakeFragmentsVisitor makeFragmentsVisitor = new MakeFragmentsVisitor(); Fragment rootFragment; try { rootFragment = rootOperator.accept(makeFragmentsVisitor, null); } catch (FragmentSetupException e) { fail("Failure while fragmenting query.", e); return; } PlanningSet planningSet = StatsCollector.collectStats(rootFragment); SimpleParallelizer parallelizer = new SimpleParallelizer(); try { QueryWorkUnit work = parallelizer.getFragments( context.getCurrentEndpoint(), queryId, context.getActiveEndpoints(), context.getPlanReader(), rootFragment, planningSet, context.getConfig().getInt(ExecConstants.GLOBAL_MAX_WIDTH), context.getConfig().getInt(ExecConstants.MAX_WIDTH_PER_ENDPOINT)); this.context .getWorkBus() .setFragmentStatusListener( work.getRootFragment().getHandle().getQueryId(), fragmentManager); List<PlanFragment> leafFragments = Lists.newArrayList(); List<PlanFragment> intermediateFragments = Lists.newArrayList(); // store fragments in distributed grid. logger.debug("Storing fragments"); for (PlanFragment f : work.getFragments()) { // store all fragments in grid since they are part of handshake. context.getCache().storeFragment(f); if (f.getLeafFragment()) { leafFragments.add(f); } else { intermediateFragments.add(f); } } logger.debug("Fragments stored."); logger.debug("Submitting fragments to run."); fragmentManager.runFragments( bee, work.getRootFragment(), work.getRootOperator(), initiatingClient, leafFragments, intermediateFragments); logger.debug("Fragments running."); } catch (ExecutionSetupException | RpcException e) { fail("Failure while setting up query.", e); } }
/** * setQueryManager * * @param queryMgr a {@link org.opennms.netmgt.linkd.QueryManager} object. */ public void setQueryManager(QueryManager queryMgr) { m_queryMgr = queryMgr; // TODO: Circular; refactor so this can be set in spring queryMgr.setLinkd(this); }
/** * Set up the non-root fragments for execution. Some may be local, and some may be remote. * Messages are sent immediately, so they may start returning data even before we complete this. * * @param fragments the fragments * @throws ForemanException */ private void setupNonRootFragments(final Collection<PlanFragment> fragments) throws ForemanException { /* * We will send a single message to each endpoint, regardless of how many fragments will be * executed there. We need to start up the intermediate fragments first so that they will be * ready once the leaf fragments start producing data. To satisfy both of these, we will * make a pass through the fragments and put them into these two maps according to their * leaf/intermediate state, as well as their target drillbit. */ final Multimap<DrillbitEndpoint, PlanFragment> leafFragmentMap = ArrayListMultimap.create(); final Multimap<DrillbitEndpoint, PlanFragment> intFragmentMap = ArrayListMultimap.create(); // record all fragments for status purposes. for (final PlanFragment planFragment : fragments) { logger.trace( "Tracking intermediate remote node {} with data {}", planFragment.getAssignment(), planFragment.getFragmentJson()); queryManager.addFragmentStatusTracker(planFragment, false); if (planFragment.getLeafFragment()) { leafFragmentMap.put(planFragment.getAssignment(), planFragment); } else { intFragmentMap.put(planFragment.getAssignment(), planFragment); } } /* * We need to wait for the intermediates to be sent so that they'll be set up by the time * the leaves start producing data. We'll use this latch to wait for the responses. * * However, in order not to hang the process if any of the RPC requests fails, we always * count down (see FragmentSubmitFailures), but we count the number of failures so that we'll * know if any submissions did fail. */ final int numIntFragments = intFragmentMap.keySet().size(); final ExtendedLatch endpointLatch = new ExtendedLatch(numIntFragments); final FragmentSubmitFailures fragmentSubmitFailures = new FragmentSubmitFailures(); // send remote intermediate fragments for (final DrillbitEndpoint ep : intFragmentMap.keySet()) { sendRemoteFragments(ep, intFragmentMap.get(ep), endpointLatch, fragmentSubmitFailures); } final long timeout = RPC_WAIT_IN_MSECS_PER_FRAGMENT * numIntFragments; if (numIntFragments > 0 && !endpointLatch.awaitUninterruptibly(timeout)) { long numberRemaining = endpointLatch.getCount(); throw UserException.connectionError() .message( "Exceeded timeout (%d) while waiting send intermediate work fragments to remote nodes. " + "Sent %d and only heard response back from %d nodes.", timeout, numIntFragments, numIntFragments - numberRemaining) .build(logger); } // if any of the intermediate fragment submissions failed, fail the query final List<FragmentSubmitFailures.SubmissionException> submissionExceptions = fragmentSubmitFailures.submissionExceptions; if (submissionExceptions.size() > 0) { Set<DrillbitEndpoint> endpoints = Sets.newHashSet(); StringBuilder sb = new StringBuilder(); boolean first = true; for (FragmentSubmitFailures.SubmissionException e : fragmentSubmitFailures.submissionExceptions) { DrillbitEndpoint endpoint = e.drillbitEndpoint; if (endpoints.add(endpoint)) { if (first) { first = false; } else { sb.append(", "); } sb.append(endpoint.getAddress()); } } throw UserException.connectionError(submissionExceptions.get(0).rpcException) .message("Error setting up remote intermediate fragment execution") .addContext("Nodes with failures", sb.toString()) .build(logger); } injector.injectChecked( queryContext.getExecutionControls(), "send-fragments", ForemanException.class); /* * Send the remote (leaf) fragments; we don't wait for these. Any problems will come in through * the regular sendListener event delivery. */ for (final DrillbitEndpoint ep : leafFragmentMap.keySet()) { sendRemoteFragments(ep, leafFragmentMap.get(ep), null, null); } }
/** * Called by execution pool to do query setup, and kick off remote execution. * * <p>Note that completion of this function is not the end of the Foreman's role in the query's * lifecycle. */ @Override public void run() { // rename the thread we're using for debugging purposes final Thread currentThread = Thread.currentThread(); final String originalName = currentThread.getName(); currentThread.setName(QueryIdHelper.getQueryId(queryId) + ":foreman"); // track how long the query takes queryManager.markStartTime(); try { injector.injectChecked( queryContext.getExecutionControls(), "run-try-beginning", ForemanException.class); queryText = queryRequest.getPlan(); // convert a run query request into action switch (queryRequest.getType()) { case LOGICAL: parseAndRunLogicalPlan(queryRequest.getPlan()); break; case PHYSICAL: parseAndRunPhysicalPlan(queryRequest.getPlan()); break; case SQL: runSQL(queryRequest.getPlan()); break; default: throw new IllegalStateException(); } injector.injectChecked( queryContext.getExecutionControls(), "run-try-end", ForemanException.class); } catch (final OutOfMemoryException | OutOfMemoryRuntimeException e) { moveToState(QueryState.FAILED, UserException.memoryError(e).build(logger)); } catch (final ForemanException e) { moveToState(QueryState.FAILED, e); } catch (AssertionError | Exception ex) { moveToState( QueryState.FAILED, new ForemanException( "Unexpected exception during fragment initialization: " + ex.getMessage(), ex)); } catch (final OutOfMemoryError e) { if ("Direct buffer memory".equals(e.getMessage())) { moveToState( QueryState.FAILED, UserException.resourceError(e) .message("One or more nodes ran out of memory while executing the query.") .build(logger)); } else { /* * FragmentExecutors use a DrillbitStatusListener to watch out for the death of their query's Foreman. So, if we * die here, they should get notified about that, and cancel themselves; we don't have to attempt to notify * them, which might not work under these conditions. */ System.out.println("Node ran out of Heap memory, exiting."); e.printStackTrace(); System.out.flush(); System.exit(-1); } } finally { /* * Begin accepting external events. * * Doing this here in the finally clause will guarantee that it occurs. Otherwise, if there * is an exception anywhere during setup, it wouldn't occur, and any events that are generated * as a result of any partial setup that was done (such as the FragmentSubmitListener, * the ResponseSendListener, or an external call to cancel()), will hang the thread that makes the * event delivery call. * * If we do throw an exception during setup, and have already moved to QueryState.FAILED, we just need to * make sure that we can't make things any worse as those events are delivered, but allow * any necessary remaining cleanup to proceed. * * Note that cancellations cannot be simulated before this point, i.e. pauses can be injected, because Foreman * would wait on the cancelling thread to signal a resume and the cancelling thread would wait on the Foreman * to accept events. */ acceptExternalEvents.countDown(); // If we received the resume signal before fragments are setup, the first call does not // actually resume the // fragments. Since setup is done, all fragments must have been delivered to remote nodes. Now // we can resume. if (resume) { resume(); } injector.injectPause(queryContext.getExecutionControls(), "foreman-ready", logger); // restore the thread's original name currentThread.setName(originalName); } /* * Note that despite the run() completing, the Foreman continues to exist, and receives * events (indirectly, through the QueryManager's use of stateListener), about fragment * completions. It won't go away until everything is completed, failed, or cancelled. */ }
private void recordNewState(final QueryState newState) { state = newState; queryManager.updateEphemeralState(newState); }