Пример #1
0
    /**
     * Parse the parameters of a connection into a CoreNLP properties file that can be passed into
     * {@link StanfordCoreNLP}, and used in the I/O stages.
     *
     * @param httpExchange The http exchange; effectively, the request information.
     * @return A {@link Properties} object corresponding to a combination of default and passed
     *     properties.
     * @throws UnsupportedEncodingException Thrown if we could not decode the key/value pairs with
     *     UTF-8.
     */
    private Properties getProperties(HttpExchange httpExchange)
        throws UnsupportedEncodingException {
      // Load the default properties
      Properties props = new Properties();
      defaultProps
          .entrySet()
          .stream()
          .forEach(
              entry -> props.setProperty(entry.getKey().toString(), entry.getValue().toString()));

      // Try to get more properties from query string.
      Map<String, String> urlParams = getURLParams(httpExchange.getRequestURI());
      if (urlParams.containsKey("properties")) {
        StringUtils.decodeMap(URLDecoder.decode(urlParams.get("properties"), "UTF-8"))
            .entrySet()
            .forEach(entry -> props.setProperty(entry.getKey(), entry.getValue()));
      } else if (urlParams.containsKey("props")) {
        StringUtils.decodeMap(URLDecoder.decode(urlParams.get("properties"), "UTF-8"))
            .entrySet()
            .forEach(entry -> props.setProperty(entry.getKey(), entry.getValue()));
      }

      // Make sure the properties compile
      props.setProperty(
          "annotators",
          StanfordCoreNLP.ensurePrerequisiteAnnotators(
              props.getProperty("annotators").split("[, \t]+")));

      return props;
    }
Пример #2
0
 @Override
 public boolean isStateTransferInProgressForKey(Object key) {
   if (configuration.clustering().cacheMode().isInvalidation()) {
     return false;
   }
   // todo [anistor] also return true for keys to be removed (now we report only keys to be added)
   synchronized (this) {
     return cacheTopology != null && transfersBySegment.containsKey(getSegment(key));
   }
 }
Пример #3
0
  private void fixTimeLimit() {
    if (timeLimitFuture != null) timeLimitFuture.cancel(true);

    if (running && limits.containsKey(TIMER_COUNTER)) {
      long delay = limits.get(TIMER_COUNTER) * 1000 - time;
      if (delay > 0) {
        timeLimitFuture = scheduler.schedule(new TimeLimitTask(), delay, TimeUnit.MILLISECONDS);
      }
    }
  }
Пример #4
0
  static void removeFromLayers(Shape shape) {
    if (!layerOf.containsKey(shape)) return;

    int oldLayer = layerOf.get(shape);
    layerContents.get(oldLayer).remove(shape);
    if (layerContents.get(oldLayer).isEmpty()) {
      layerContents.remove(oldLayer);
      layers.remove((Integer) oldLayer);
    }
    layerOf.remove(shape);
  }
Пример #5
0
  static {
    threadPools.put(
        Stage.TRANSCODER,
        new ThreadPoolExecutor(
            3,
            3,
            5 * 60,
            TimeUnit.SECONDS,
            new LinkedBlockingQueue<Runnable>(),
            new ThreadFactory() {
              public Thread newThread(Runnable r) {
                return ThreadPools.newThread(
                    r, "TranscoderThread-" + Stage.TRANSCODER + "-" + (transSeq++));
              }
            }));
    threadPools.put(
        Stage.RECOGNIZER,
        new ThreadPoolExecutor(
            3,
            3,
            5 * 60,
            TimeUnit.SECONDS,
            new LinkedBlockingQueue<Runnable>(),
            new ThreadFactory() {
              public Thread newThread(Runnable r) {
                return ThreadPools.newThread(
                    r, "TranscoderThread-" + Stage.RECOGNIZER + "-" + (transSeq++));
              }
            }));

    // register them too
    ThreadPools.getThreadPools()
        .put(Executors.class.getName() + "." + Stage.TRANSCODER, threadPools.get(Stage.TRANSCODER));
    ThreadPools.getThreadPools()
        .put(Executors.class.getName() + "." + Stage.RECOGNIZER, threadPools.get(Stage.RECOGNIZER));

    // fill the rest of the map too, so we don't have to think about it any more later on.
    for (Stage s : Stage.values()) {
      if (!threadPools.containsKey(s)) {
        threadPools.put(s, ThreadPools.jobsExecutor);
      }
    }
    // default configuration, 5 + 1 executors.
    for (int i = 0; i < 5; i++) {
      executorsMap.put(new CommandExecutor.Method(), Stage.TRANSCODER);
    }
    executorsMap.put(new CommandExecutor.Method(), Stage.RECOGNIZER);
    readConfiguration();
  }
Пример #6
0
  void doStatusCheck() {
    long now = System.currentTimeMillis();

    Set<InetAddress> eps = endpointStateMap_.keySet();
    for (InetAddress endpoint : eps) {
      if (endpoint.equals(localEndpoint_)) continue;

      FailureDetector.instance.interpret(endpoint);
      EndpointState epState = endpointStateMap_.get(endpoint);
      if (epState != null) {
        long duration = now - epState.getUpdateTimestamp();

        if (StorageService.instance.getTokenMetadata().isMember(endpoint))
          epState.setHasToken(true);
        // check if this is a fat client. fat clients are removed automatically from
        // gosip after FatClientTimeout
        if (!epState.getHasToken()
            && !epState.isAlive()
            && !justRemovedEndpoints_.containsKey(endpoint)
            && (duration > FatClientTimeout_)) {
          logger_.info(
              "FatClient "
                  + endpoint
                  + " has been silent for "
                  + FatClientTimeout_
                  + "ms, removing from gossip");
          removeEndpoint(
              endpoint); // will put it in justRemovedEndpoints to respect quarantine delay
          evictFromMembership(endpoint); // can get rid of the state immediately
        }

        if (!epState.isAlive() && (duration > aVeryLongTime_)) {
          evictFromMembership(endpoint);
        }
      }
    }

    if (!justRemovedEndpoints_.isEmpty()) {
      Map<InetAddress, Long> copy = new HashMap<InetAddress, Long>(justRemovedEndpoints_);
      for (Map.Entry<InetAddress, Long> entry : copy.entrySet()) {
        if ((now - entry.getValue()) > QUARANTINE_DELAY) {
          if (logger_.isDebugEnabled())
            logger_.debug(
                QUARANTINE_DELAY + " elapsed, " + entry.getKey() + " gossip quarantine over");
          justRemovedEndpoints_.remove(entry.getKey());
        }
      }
    }
  }
  private long checkPlatformPackCompliance(
      List<CmsCIRelation> designPlatRels, CmsCI env, String nsPath, String userId) {

    List<CmsCIRelation> manifestPlatRels =
        cmProcessor.getFromCIRelations(
            env.getCiId(), MANIFEST_COMPOSED_OF, null, MANIFEST_PLATFORM);

    Map<String, String> manifestPlatPacks = new HashMap<String, String>(manifestPlatRels.size());
    for (CmsCIRelation manifestRel : manifestPlatRels) {
      CmsCI plat = manifestRel.getToCi();
      String key = plat.getCiName() + ":" + plat.getAttribute("major_version").getDjValue();
      String value =
          plat.getAttribute("source").getDjValue()
              + ":"
              + plat.getAttribute("pack").getDjValue()
              + ":"
              + plat.getAttribute("version").getDjValue();
      manifestPlatPacks.put(key, value);
    }

    long newReleaseId = 0;

    for (CmsCIRelation designRel : designPlatRels) {
      CmsCI dPlat = designRel.getToCi();
      String key = dPlat.getCiName() + ":" + dPlat.getAttribute("major_version").getDjValue();
      String value =
          dPlat.getAttribute("source").getDjValue()
              + ":"
              + dPlat.getAttribute("pack").getDjValue()
              + ":"
              + dPlat.getAttribute("version").getDjValue();
      if (manifestPlatPacks.containsKey(key) && !value.equals(manifestPlatPacks.get(key))) {
        String platNsPath =
            nsPath
                + "/"
                + dPlat.getCiName()
                + "/"
                + dPlat.getAttribute("major_version").getDfValue();
        List<CmsRfcCI> mPlats =
            cmRfcMrgProcessor.getDfDjCi(platNsPath, MANIFEST_PLATFORM, dPlat.getCiName(), "dj");
        if (mPlats.size() > 0) {
          newReleaseId = manifestRfcProcessor.deleteManifestPlatform(mPlats.get(0), userId);
        }
      }
    }
    return newReleaseId;
  }
Пример #8
0
 @Override
 public void handle(HttpExchange httpExchange) throws IOException {
   Map<String, String> urlParams = getURLParams(httpExchange.getRequestURI());
   httpExchange.getResponseHeaders().set("Content-Type", "text/plain");
   boolean doExit = false;
   String response = "Invalid shutdown key\n";
   if (urlParams.containsKey("key") && urlParams.get("key").equals(shutdownKey)) {
     response = "Shutdown successful!\n";
     doExit = true;
   }
   httpExchange.sendResponseHeaders(HTTP_OK, response.getBytes().length);
   httpExchange.getResponseBody().write(response.getBytes());
   httpExchange.close();
   if (doExit) {
     System.exit(0);
   }
 }
 private Map<String, String> buildMapping() {
   Map<String, String> extensionMapping = new LinkedHashMap<String, String>();
   // force inclusion of unknow item to manage unknown files
   extensionMapping.put(
       DocumentType.UNKNOWN.getExtension(), DocumentType.UNKNOWN.getDefaultHeaderTypeName());
   for (Map.Entry<String, String> entry : mapping.entrySet()) {
     extensionMapping.put(entry.getKey().toLowerCase(), entry.getValue().toLowerCase());
   }
   if (useDefaultMapping) {
     for (Map.Entry<String, String> entry : defaultMapping().entrySet()) {
       if (!extensionMapping.containsKey(entry.getKey())) {
         extensionMapping.put(entry.getKey(), entry.getValue());
       }
     }
   }
   return extensionMapping;
 }
Пример #10
0
 private void exec(String name, Search search) {
   VM vm = search.getVM();
   PCChoiceGenerator choiceGenerator = vm.getLastChoiceGeneratorOfType(PCChoiceGenerator.class);
   if (choiceGenerator != null) {
     PathCondition pc = choiceGenerator.getCurrentPC();
     if (search.getErrors().size() < 0) {
       return;
     }
     Property property = search.getLastError().getProperty();
     if (!property.getErrorMessage().contains(AssertionError.class.getCanonicalName())) {
       pc.header = pc.header.not();
     }
     /*
      * if (property instanceof NoUncaughtExceptionsProperty) {
      * NoUncaughtExceptionsProperty noUncaughtExceptionsProperty =
      * (NoUncaughtExceptionsProperty) property; String clName =
      * noUncaughtExceptionsProperty
      * .getUncaughtExceptionInfo().getCauseClassname();
      * if(!clName.equals(AssertionError.class.getCanonicalName())) {
      *
      * } System.out.println(clName); }
      */
     //
     /*
      * if (instruction instanceof IfInstruction) { if
      * (((IfInstruction) instruction).getConditionValue()) {
      * pc.solve();
      *
      * } }
      */
     pc.solve();
     Map<String, Object> varsVals = new HashMap<String, Object>();
     pc.header.getVarVals(varsVals);
     if (varsVals.containsKey("guess_fix")) {
       this.result = varsVals.get("guess_fix");
       if (processor.getType().equals(Boolean.class)) {
         this.result = this.result.equals(1);
       }
     }
     logger.debug("JPF Result " + this.result);
   }
 }
Пример #11
0
  /**
   * Make sure that the deployment unit is loaded.
   *
   * @param duName deployment unit name
   */
  protected boolean load(final String duName) {
    _rw.writeLock().lock();
    try {
      if (_deploymentUnits.containsKey(duName)) return true;
    } finally {
      _rw.writeLock().unlock();
    }

    try {
      return exec(
          new Callable<Boolean>() {
            public Boolean call(ConfStoreConnection conn) {
              DeploymentUnitDAO dudao = conn.getDeploymentUnit(duName);
              if (dudao == null) return false;
              load(dudao);
              return true;
            }
          });
    } catch (Exception ex) {
      __log.error("Error loading deployment unit: " + duName);
      return false;
    }
  }
Пример #12
0
 /** Returns the {@link SegmentCacheIndex} for a given {@link RolapStar}. */
 public SegmentCacheIndex getIndex(RolapStar star) {
   if (!indexes.containsKey(star)) {
     indexes.put(star, new SegmentCacheIndexImpl(thread));
   }
   return indexes.get(star);
 }
Пример #13
0
 @Override
 public long getLimit(@Nonnull String counterName) {
   return limits.containsKey(counterName) ? limits.get(counterName) : -1;
 }
Пример #14
0
  /**
   * Opens new WebRTC data channel using specified parameters.
   *
   * @param type channel type as defined in control protocol description. Use 0 for "reliable".
   * @param prio channel priority. The higher the number, the lower the priority.
   * @param reliab Reliability Parameter<br>
   *     This field is ignored if a reliable channel is used. If a partial reliable channel with
   *     limited number of retransmissions is used, this field specifies the number of
   *     retransmissions. If a partial reliable channel with limited lifetime is used, this field
   *     specifies the maximum lifetime in milliseconds. The following table summarizes this:<br>
   *     </br>
   *     <p>+------------------------------------------------+------------------+ | Channel Type |
   *     Reliability | | | Parameter |
   *     +------------------------------------------------+------------------+ |
   *     DATA_CHANNEL_RELIABLE | Ignored | | DATA_CHANNEL_RELIABLE_UNORDERED | Ignored | |
   *     DATA_CHANNEL_PARTIAL_RELIABLE_REXMIT | Number of RTX | |
   *     DATA_CHANNEL_PARTIAL_RELIABLE_REXMIT_UNORDERED | Number of RTX | |
   *     DATA_CHANNEL_PARTIAL_RELIABLE_TIMED | Lifetime in ms | |
   *     DATA_CHANNEL_PARTIAL_RELIABLE_TIMED_UNORDERED | Lifetime in ms |
   *     +------------------------------------------------+------------------+
   * @param sid SCTP stream id that will be used by new channel (it must not be already used).
   * @param label text label for the channel.
   * @return new instance of <tt>WebRtcDataStream</tt> that represents opened WebRTC data channel.
   * @throws IOException if IO error occurs.
   */
  public synchronized WebRtcDataStream openChannel(
      int type, int prio, long reliab, int sid, String label) throws IOException {
    if (channels.containsKey(sid)) {
      throw new IOException("Channel on sid: " + sid + " already exists");
    }

    // Label Length & Label
    byte[] labelBytes;
    int labelByteLength;

    if (label == null) {
      labelBytes = null;
      labelByteLength = 0;
    } else {
      labelBytes = label.getBytes("UTF-8");
      labelByteLength = labelBytes.length;
      if (labelByteLength > 0xFFFF) labelByteLength = 0xFFFF;
    }

    // Protocol Length & Protocol
    String protocol = WEBRTC_DATA_CHANNEL_PROTOCOL;
    byte[] protocolBytes;
    int protocolByteLength;

    if (protocol == null) {
      protocolBytes = null;
      protocolByteLength = 0;
    } else {
      protocolBytes = protocol.getBytes("UTF-8");
      protocolByteLength = protocolBytes.length;
      if (protocolByteLength > 0xFFFF) protocolByteLength = 0xFFFF;
    }

    ByteBuffer packet = ByteBuffer.allocate(12 + labelByteLength + protocolByteLength);

    // Message open new channel on current sid
    // Message Type
    packet.put((byte) MSG_OPEN_CHANNEL);
    // Channel Type
    packet.put((byte) type);
    // Priority
    packet.putShort((short) prio);
    // Reliability Parameter
    packet.putInt((int) reliab);
    // Label Length
    packet.putShort((short) labelByteLength);
    // Protocol Length
    packet.putShort((short) protocolByteLength);
    // Label
    if (labelByteLength != 0) {
      packet.put(labelBytes, 0, labelByteLength);
    }
    // Protocol
    if (protocolByteLength != 0) {
      packet.put(protocolBytes, 0, protocolByteLength);
    }

    int sentCount = sctpSocket.send(packet.array(), true, sid, WEB_RTC_PPID_CTRL);

    if (sentCount != packet.capacity()) {
      throw new IOException("Failed to open new chanel on sid: " + sid);
    }

    WebRtcDataStream channel = new WebRtcDataStream(sctpSocket, sid, label, false);

    channels.put(sid, channel);

    return channel;
  }
 /** {@inheritDoc} */
 @Override
 public boolean isCacheTopologyValid(GridCacheContext cctx) {
   return cctx.config().getTopologyValidator() != null && cacheValidRes.containsKey(cctx.cacheId())
       ? cacheValidRes.get(cctx.cacheId())
       : true;
 }
Пример #16
0
 private void handleNewJoin(InetAddress ep, EndpointState epState) {
   if (justRemovedEndpoints_.containsKey(ep)) return;
   logger_.info("Node {} is now part of the cluster", ep);
   handleMajorStateChange(ep, epState, false);
 }
Пример #17
0
  /**
   * Handles control packet.
   *
   * @param data raw packet data that arrived on control PPID.
   * @param sid SCTP stream id on which the data has arrived.
   */
  private synchronized void onCtrlPacket(byte[] data, int sid) throws IOException {
    ByteBuffer buffer = ByteBuffer.wrap(data);
    int messageType = /* 1 byte unsigned integer */ 0xFF & buffer.get();

    if (messageType == MSG_CHANNEL_ACK) {
      if (logger.isDebugEnabled()) {
        logger.debug(getEndpoint().getID() + " ACK received SID: " + sid);
      }
      // Open channel ACK
      WebRtcDataStream channel = channels.get(sid);
      if (channel != null) {
        // Ack check prevents from firing multiple notifications
        // if we get more than one ACKs (by mistake/bug).
        if (!channel.isAcknowledged()) {
          channel.ackReceived();
          notifyChannelOpened(channel);
        } else {
          logger.warn("Redundant ACK received for SID: " + sid);
        }
      } else {
        logger.error("No channel exists on sid: " + sid);
      }
    } else if (messageType == MSG_OPEN_CHANNEL) {
      int channelType = /* 1 byte unsigned integer */ 0xFF & buffer.get();
      int priority = /* 2 bytes unsigned integer */ 0xFFFF & buffer.getShort();
      long reliability = /* 4 bytes unsigned integer */ 0xFFFFFFFFL & buffer.getInt();
      int labelLength = /* 2 bytes unsigned integer */ 0xFFFF & buffer.getShort();
      int protocolLength = /* 2 bytes unsigned integer */ 0xFFFF & buffer.getShort();
      String label;
      String protocol;

      if (labelLength == 0) {
        label = "";
      } else {
        byte[] labelBytes = new byte[labelLength];

        buffer.get(labelBytes);
        label = new String(labelBytes, "UTF-8");
      }
      if (protocolLength == 0) {
        protocol = "";
      } else {
        byte[] protocolBytes = new byte[protocolLength];

        buffer.get(protocolBytes);
        protocol = new String(protocolBytes, "UTF-8");
      }

      if (logger.isDebugEnabled()) {
        logger.debug(
            "!!! "
                + getEndpoint().getID()
                + " data channel open request on SID: "
                + sid
                + " type: "
                + channelType
                + " prio: "
                + priority
                + " reliab: "
                + reliability
                + " label: "
                + label
                + " proto: "
                + protocol);
      }

      if (channels.containsKey(sid)) {
        logger.error("Channel on sid: " + sid + " already exists");
      }

      WebRtcDataStream newChannel = new WebRtcDataStream(sctpSocket, sid, label, true);
      channels.put(sid, newChannel);

      sendOpenChannelAck(sid);

      notifyChannelOpened(newChannel);
    } else {
      logger.error("Unexpected ctrl msg type: " + messageType);
    }
  }
    /**
     * Inspect an <tt>RTCPCompoundPacket</tt> and build-up the state for future estimations.
     *
     * @param pkt
     */
    public void apply(RTCPCompoundPacket pkt) {
      if (pkt == null || pkt.packets == null || pkt.packets.length == 0) {
        return;
      }

      for (RTCPPacket rtcpPacket : pkt.packets) {
        switch (rtcpPacket.type) {
          case RTCPPacket.SR:
            RTCPSRPacket srPacket = (RTCPSRPacket) rtcpPacket;

            // The media sender SSRC.
            int ssrc = srPacket.ssrc;

            // Convert 64-bit NTP timestamp to Java standard time.
            // Note that java time (milliseconds) by definition has
            // less precision then NTP time (picoseconds) so
            // converting NTP timestamp to java time and back to NTP
            // timestamp loses precision. For example, Tue, Dec 17
            // 2002 09:07:24.810 EST is represented by a single
            // Java-based time value of f22cd1fc8a, but its NTP
            // equivalent are all values ranging from
            // c1a9ae1c.cf5c28f5 to c1a9ae1c.cf9db22c.

            // Use round-off on fractional part to preserve going to
            // lower precision
            long fraction = Math.round(1000D * srPacket.ntptimestamplsw / 0x100000000L);
            /*
             * If the most significant bit (MSB) on the seconds
             * field is set we use a different time base. The
             * following text is a quote from RFC-2030 (SNTP v4):
             *
             * If bit 0 is set, the UTC time is in the range
             * 1968-2036 and UTC time is reckoned from 0h 0m 0s UTC
             * on 1 January 1900. If bit 0 is not set, the time is
             * in the range 2036-2104 and UTC time is reckoned from
             * 6h 28m 16s UTC on 7 February 2036.
             */
            long msb = srPacket.ntptimestampmsw & 0x80000000L;
            long remoteTime =
                (msb == 0)
                    // use base: 7-Feb-2036 @ 06:28:16 UTC
                    ? msb0baseTime + (srPacket.ntptimestampmsw * 1000) + fraction
                    // use base: 1-Jan-1900 @ 01:00:00 UTC
                    : msb1baseTime + (srPacket.ntptimestampmsw * 1000) + fraction;

            // Estimate the clock rate of the sender.
            int frequencyHz = -1;
            if (receivedClocks.containsKey(ssrc)) {
              // Calculate the clock rate.
              ReceivedRemoteClock oldStats = receivedClocks.get(ssrc);
              RemoteClock oldRemoteClock = oldStats.getRemoteClock();
              frequencyHz =
                  Math.round(
                      (float)
                              (((int) srPacket.rtptimestamp - oldRemoteClock.getRtpTimestamp())
                                  & 0xffffffffl)
                          / (remoteTime - oldRemoteClock.getRemoteTime()));
            }

            // Replace whatever was in there before.
            receivedClocks.put(
                ssrc,
                new ReceivedRemoteClock(
                    ssrc, remoteTime, (int) srPacket.rtptimestamp, frequencyHz));
            break;
          case RTCPPacket.SDES:
            break;
        }
      }
    }
Пример #19
0
 public boolean isKnownEndpoint(InetAddress endpoint) {
   return endpointStateMap_.containsKey(endpoint);
 }
Пример #20
0
  /** Deploys a process. */
  public Collection<QName> deploy(
      final File deploymentUnitDirectory,
      boolean activate,
      String duName,
      boolean autoincrementVersion) {
    __log.info(__msgs.msgDeployStarting(deploymentUnitDirectory));

    final Date deployDate = new Date();

    // Create the DU and compile/scan it before acquiring lock.
    final DeploymentUnitDir du = new DeploymentUnitDir(deploymentUnitDirectory);
    if (duName != null) {
      // Override the package name if given from the parameter
      du.setName(duName);
    }

    long version;
    if (autoincrementVersion || du.getStaticVersion() == -1) {
      // Process and DU use a monotonically increased single version number by default.
      try {
        version = getCurrentVersion();
      } finally {
        // we need to reset the current version thread local value.
        _currentVersion.set(null);
      }
    } else {
      version = du.getStaticVersion();
    }
    du.setVersion(version);

    try {
      du.compile();
    } catch (CompilationException ce) {
      String errmsg = __msgs.msgDeployFailCompileErrors(ce);
      __log.error(errmsg, ce);
      throw new ContextException(errmsg, ce);
    }

    du.scan();
    final DeployDocument dd = du.getDeploymentDescriptor();
    final ArrayList<ProcessConfImpl> processes = new ArrayList<ProcessConfImpl>();
    Collection<QName> deployed;

    _rw.writeLock().lock();

    try {
      if (_deploymentUnits.containsKey(du.getName())) {
        String errmsg = __msgs.msgDeployFailDuplicateDU(du.getName());
        __log.error(errmsg);
        throw new ContextException(errmsg);
      }

      retirePreviousPackageVersions(du);

      for (TDeployment.Process processDD : dd.getDeploy().getProcessArray()) {
        QName pid = toPid(processDD.getName(), version);

        if (_processes.containsKey(pid)) {
          String errmsg = __msgs.msgDeployFailDuplicatePID(processDD.getName(), du.getName());
          __log.error(errmsg);
          throw new ContextException(errmsg);
        }

        QName type = processDD.getType() != null ? processDD.getType() : processDD.getName();

        CBPInfo cbpInfo = du.getCBPInfo(type);
        if (cbpInfo == null) {
          String errmsg = __msgs.msgDeployFailedProcessNotFound(processDD.getName(), du.getName());
          __log.error(errmsg);
          throw new ContextException(errmsg);
        }

        ProcessConfImpl pconf =
            new ProcessConfImpl(
                pid,
                processDD.getName(),
                version,
                du,
                processDD,
                deployDate,
                calcInitialProperties(du.getProperties(), processDD),
                calcInitialState(processDD),
                eprContext,
                _configDir,
                generateProcessEventsAll);
        processes.add(pconf);
      }

      _deploymentUnits.put(du.getName(), du);

      for (ProcessConfImpl process : processes) {
        __log.info(__msgs.msgProcessDeployed(du.getDeployDir(), process.getProcessId()));
        _processes.put(process.getProcessId(), process);
      }

    } finally {
      _rw.writeLock().unlock();
    }

    // Do the deployment in the DB. We need this so that we remember deployments across system
    // shutdowns.
    // We don't fail if there is a DB error, simply print some errors.
    deployed =
        exec(
            new Callable<Collection<QName>>() {
              public Collection<QName> call(ConfStoreConnection conn) {
                // Check that this deployment unit is not deployed.
                DeploymentUnitDAO dudao = conn.getDeploymentUnit(du.getName());
                if (dudao != null) {
                  String errmsg = "Database out of synch for DU " + du.getName();
                  __log.warn(errmsg);
                  dudao.delete();
                }

                dudao = conn.createDeploymentUnit(du.getName());
                try {
                  dudao.setDeploymentUnitDir(deploymentUnitDirectory.getCanonicalPath());
                } catch (IOException e1) {
                  String errmsg =
                      "Error getting canonical path for "
                          + du.getName()
                          + "; deployment unit will not be available after restart!";
                  __log.error(errmsg);
                }

                ArrayList<QName> deployed = new ArrayList<QName>();
                // Going trough each process declared in the dd
                for (ProcessConfImpl pc : processes) {
                  try {
                    ProcessConfDAO newDao =
                        dudao.createProcess(pc.getProcessId(), pc.getType(), pc.getVersion());
                    newDao.setState(pc.getState());
                    for (Map.Entry<QName, Node> prop : pc.getProcessProperties().entrySet()) {
                      newDao.setProperty(prop.getKey(), DOMUtils.domToString(prop.getValue()));
                    }
                    deployed.add(pc.getProcessId());
                  } catch (Throwable e) {
                    String errmsg =
                        "Error persisting deployment record for "
                            + pc.getProcessId()
                            + "; process will not be available after restart!";
                    __log.error(errmsg, e);
                  }
                }
                return deployed;
              }
            });

    _rw.readLock().lock();
    boolean readLockHeld = true;
    try {
      for (ProcessConfImpl process : processes) {
        fireEvent(
            new ProcessStoreEvent(
                ProcessStoreEvent.Type.DEPLOYED,
                process.getProcessId(),
                process.getDeploymentUnit().getName()));
        fireStateChange(
            process.getProcessId(), process.getState(), process.getDeploymentUnit().getName());
      }
    } catch (Exception e) {
      // need to unlock as undeploy operation will need a writeLock
      _rw.readLock().unlock();
      readLockHeld = false;

      // A problem at that point means that engine deployment failed, we don't want the store to
      // keep the du
      __log.warn("Deployment failed within the engine, store undeploying process.", e);
      undeploy(deploymentUnitDirectory);
      if (e instanceof ContextException) throw (ContextException) e;
      else throw new ContextException("Deployment failed within the engine. " + e.getMessage(), e);
    } finally {
      if (readLockHeld) _rw.readLock().unlock();
    }

    return deployed;
  }
Пример #21
0
    @Override
    public void handle(HttpExchange httpExchange) throws IOException {
      // Set common response headers
      httpExchange.getResponseHeaders().add("Access-Control-Allow-Origin", "*");

      Future<String> json =
          corenlpExecutor.submit(
              () -> {
                try {
                  // Get the document
                  Properties props =
                      new Properties() {
                        {
                          setProperty("annotators", "tokenize,ssplit,pos,lemma,ner,depparse");
                        }
                      };
                  Annotation doc = getDocument(props, httpExchange);
                  if (!doc.containsKey(CoreAnnotations.SentencesAnnotation.class)) {
                    StanfordCoreNLP pipeline = mkStanfordCoreNLP(props);
                    pipeline.annotate(doc);
                  }

                  // Construct the matcher
                  Map<String, String> params = getURLParams(httpExchange.getRequestURI());
                  // (get the pattern)
                  if (!params.containsKey("pattern")) {
                    respondError("Missing required parameter 'pattern'", httpExchange);
                    return "";
                  }
                  String pattern = params.get("pattern");
                  // (get whether to filter / find)
                  String filterStr = params.getOrDefault("filter", "false");
                  final boolean filter =
                      filterStr.trim().isEmpty()
                          || "true".equalsIgnoreCase(filterStr.toLowerCase());
                  // (create the matcher)
                  final SemgrexPattern regex = SemgrexPattern.compile(pattern);

                  // Run TokensRegex
                  return JSONOutputter.JSONWriter.objectToJSON(
                      (docWriter) -> {
                        if (filter) {
                          // Case: just filter sentences
                          docWriter.set(
                              "sentences",
                              doc.get(CoreAnnotations.SentencesAnnotation.class)
                                  .stream()
                                  .map(
                                      sentence ->
                                          regex
                                              .matcher(
                                                  sentence.get(
                                                      SemanticGraphCoreAnnotations
                                                          .CollapsedCCProcessedDependenciesAnnotation
                                                          .class))
                                              .matches())
                                  .collect(Collectors.toList()));
                        } else {
                          // Case: find matches
                          docWriter.set(
                              "sentences",
                              doc.get(CoreAnnotations.SentencesAnnotation.class)
                                  .stream()
                                  .map(
                                      sentence ->
                                          (Consumer<JSONOutputter.Writer>)
                                              (JSONOutputter.Writer sentWriter) -> {
                                                SemgrexMatcher matcher =
                                                    regex.matcher(
                                                        sentence.get(
                                                            SemanticGraphCoreAnnotations
                                                                .CollapsedCCProcessedDependenciesAnnotation
                                                                .class));
                                                int i = 0;
                                                while (matcher.find()) {
                                                  sentWriter.set(
                                                      Integer.toString(i),
                                                      (Consumer<JSONOutputter.Writer>)
                                                          (JSONOutputter.Writer matchWriter) -> {
                                                            IndexedWord match = matcher.getMatch();
                                                            matchWriter.set("text", match.word());
                                                            matchWriter.set(
                                                                "begin", match.index() - 1);
                                                            matchWriter.set("end", match.index());
                                                            for (String capture :
                                                                matcher.getNodeNames()) {
                                                              matchWriter.set(
                                                                  "$" + capture,
                                                                  (Consumer<JSONOutputter.Writer>)
                                                                      groupWriter -> {
                                                                        IndexedWord node =
                                                                            matcher.getNode(
                                                                                capture);
                                                                        groupWriter.set(
                                                                            "text", node.word());
                                                                        groupWriter.set(
                                                                            "begin",
                                                                            node.index() - 1);
                                                                        groupWriter.set(
                                                                            "end", node.index());
                                                                      });
                                                            }
                                                          });
                                                  i += 1;
                                                }
                                                sentWriter.set("length", i);
                                              }));
                        }
                      });
                } catch (Exception e) {
                  e.printStackTrace();
                  try {
                    respondError(e.getClass().getName() + ": " + e.getMessage(), httpExchange);
                  } catch (IOException ignored) {
                  }
                }
                return "";
              });

      // Send response
      byte[] response = new byte[0];
      try {
        response = json.get(5, TimeUnit.SECONDS).getBytes();
      } catch (InterruptedException | ExecutionException | TimeoutException e) {
        respondError("Timeout when executing Semgrex query", httpExchange);
      }
      if (response.length > 0) {
        httpExchange.getResponseHeaders().add("Content-Type", "text/json");
        httpExchange.getResponseHeaders().add("Content-Length", Integer.toString(response.length));
        httpExchange.sendResponseHeaders(HTTP_OK, response.length);
        httpExchange.getResponseBody().write(response);
        httpExchange.close();
      }
    }
Пример #22
0
  public BrokerHost addChannel(Channel channel) {
    lock.lock();
    try {
      // add the channel and return the broker host
      Manageable manageable;
      if (channel.getDirection() == Direction.OUT) {
        BrokerHost host = brokerHosts.get(brokerIndex);
        List<Channel> producerChannels = brokerHostToProducerChannelMap.get(host);
        BlockingQueue<MessageContext> channelOutQueue = producerQueues.get(host);

        if (!producers.containsKey(host)) {
          manageable =
              transport.registerProducer(
                  host, prefix, channel.getProperties(), producerQueues.get(host));
          producers.put(host, manageable);

          manageable.start();
        }

        // now register the channel with the brokers map
        // check weather you have a sender consumer for this host
        channel.setOutQueue(channelOutQueue);
        producerChannels.add(channel);
        producerChannels.add(new Channel("", Direction.OUT));

        LOG.info(
            "Registering channel {} with group {} and host {}",
            channel.getName(),
            name,
            host.toString());
        incrementProducerIndex();

        return host;

      } else if (channel.getDirection() == Direction.IN) {
        BrokerHost host = brokerHosts.get(brokerIndex);
        List<Channel> consumerChannels = brokerHostToConsumerChannelMap.get(host);
        if (!consumers.containsKey(host)) {
          BlockingQueue<MessageContext> channelInQueue = consumerQueues.get(host);
          manageable =
              transport.registerConsumer(host, prefix, channel.getProperties(), channelInQueue);
          consumers.put(host, manageable);

          ConsumingWorker worker;
          if (channel.isGrouped()) {
            worker = new ConsumingWorker(consumerChannels, channelInQueue);
          } else {
            worker = new ConsumingWorker(consumerChannels, channelInQueue, true);
          }
          Thread thread = new Thread(worker);
          thread.start();

          manageable.start();

          consumingWorkers.put(host, worker);
        }

        // now register the channel with the brokers map
        // check weather you have a sender consumer for this host
        consumerChannels.add(channel);

        LOG.info(
            "Registering channel {} with group {} and host {}",
            channel.getName(),
            name,
            host.toString());
        incrementConsumerIndex();

        return host;
      }
    } finally {
      lock.unlock();
    }
    return null;
  }