コード例 #1
0
    public void run() {
      super.run();

      // save the last set of active JVMs
      Set lastActiveVms = activeVms;

      try {
        // get the current set of active JVMs
        activeVms = (HashSet) vmManager.activeVms();

      } catch (MonitorException e) {
        // XXX: use logging api
        System.err.println("MonitoredHostProvider: polling task " + "caught MonitorException:");
        e.printStackTrace();

        // mark the HostManager as errored and notify listeners
        setLastException(e);
        fireDisconnectedEvents();
      }

      if (activeVms.isEmpty()) {
        return;
      }

      Set startedVms = new HashSet();
      Set terminatedVms = new HashSet();

      for (Iterator i = activeVms.iterator(); i.hasNext(); /* empty */ ) {
        Integer vmid = (Integer) i.next();
        if (!lastActiveVms.contains(vmid)) {
          // a new file has been detected, add to set
          startedVms.add(vmid);
        }
      }

      for (Iterator i = lastActiveVms.iterator(); i.hasNext();
      /* empty */ ) {
        Object o = i.next();
        if (!activeVms.contains(o)) {
          // JVM has terminated, remove it from the active list
          terminatedVms.add(o);
        }
      }

      if (!startedVms.isEmpty() || !terminatedVms.isEmpty()) {
        fireVmStatusChangedEvents(activeVms, startedVms, terminatedVms);
      }
    }
コード例 #2
0
ファイル: RedisBungee.java プロジェクト: Civcraft/RedisBungee
 final Set<UUID> getPlayers() {
   ImmutableSet.Builder<UUID> setBuilder = ImmutableSet.builder();
   if (pool != null) {
     try (Jedis rsc = pool.getResource()) {
       List<String> keys = new ArrayList<>();
       for (String i : getServerIds()) {
         keys.add("proxy:" + i + ":usersOnline");
       }
       if (!keys.isEmpty()) {
         Set<String> users = rsc.sunion(keys.toArray(new String[keys.size()]));
         if (users != null && !users.isEmpty()) {
           for (String user : users) {
             try {
               setBuilder = setBuilder.add(UUID.fromString(user));
             } catch (IllegalArgumentException ignored) {
             }
           }
         }
       }
     } catch (JedisConnectionException e) {
       // Redis server has disappeared!
       getLogger()
           .log(
               Level.SEVERE,
               "Unable to get connection from pool - did your Redis server go away?",
               e);
       throw new RuntimeException("Unable to get all players online", e);
     }
   }
   return setBuilder.build();
 }
コード例 #3
0
ファイル: Trees.java プロジェクト: hercky/undergradCourses
  /**
   * Return information about the objects in this Tree.
   *
   * @param t The tree to examine.
   * @return A human-readable String
   */
  public static String toDebugStructureString(Tree t) {
    StringBuilder sb = new StringBuilder();
    String tCl = StringUtils.getShortClassName(t);
    String tfCl = StringUtils.getShortClassName(t.treeFactory());
    String lCl = StringUtils.getShortClassName(t.label());
    String lfCl = StringUtils.getShortClassName(t.label().labelFactory());
    Set<String> otherClasses = new HashSet<String>();
    for (Tree st : t) {
      String stCl = StringUtils.getShortClassName(st);
      String stfCl = StringUtils.getShortClassName(st.treeFactory());
      String slCl = StringUtils.getShortClassName(st.label());
      String slfCl = StringUtils.getShortClassName(st.label().labelFactory());

      if (!tCl.equals(stCl)) {
        otherClasses.add(stCl);
      }
      if (!tfCl.equals(stfCl)) {
        otherClasses.add(stfCl);
      }
      if (!lCl.equals(slCl)) {
        otherClasses.add(slCl);
      }
      if (!lfCl.equals(slfCl)) {
        otherClasses.add(slfCl);
      }
    }
    sb.append("Tree with root of class ").append(tCl).append(" and factory ").append(tfCl);
    sb.append(" with label class ").append(lCl).append(" and factory ").append(lfCl);
    if (!otherClasses.isEmpty()) {
      sb.append(" with the following classes also found within the tree: ").append(otherClasses);
    }
    return sb.toString();
  }
コード例 #4
0
  /**
   * Notifies this <tt>Conference</tt> that the ordered list of <tt>Endpoint</tt>s of {@link
   * #speechActivity} i.e. the dominant speaker history has changed.
   *
   * <p>This instance notifies the video <tt>Channel</tt>s about the change so that they may update
   * their last-n lists and report to this instance which <tt>Endpoint</tt>s are to be asked for
   * video keyframes.
   */
  private void speechActivityEndpointsChanged() {
    List<Endpoint> endpoints = null;

    for (Content content : getContents()) {
      if (MediaType.VIDEO.equals(content.getMediaType())) {
        Set<Endpoint> endpointsToAskForKeyframes = null;

        endpoints = speechActivity.getEndpoints();
        for (Channel channel : content.getChannels()) {
          if (!(channel instanceof RtpChannel)) continue;

          RtpChannel rtpChannel = (RtpChannel) channel;
          List<Endpoint> channelEndpointsToAskForKeyframes =
              rtpChannel.speechActivityEndpointsChanged(endpoints);

          if ((channelEndpointsToAskForKeyframes != null)
              && !channelEndpointsToAskForKeyframes.isEmpty()) {
            if (endpointsToAskForKeyframes == null) {
              endpointsToAskForKeyframes = new HashSet<>();
            }
            endpointsToAskForKeyframes.addAll(channelEndpointsToAskForKeyframes);
          }
        }

        if ((endpointsToAskForKeyframes != null) && !endpointsToAskForKeyframes.isEmpty()) {
          content.askForKeyframes(endpointsToAskForKeyframes);
        }
      }
    }
  }
コード例 #5
0
  /**
   * Tests that resetting the target platform should work OK (i.e. is equivalent to the models in
   * the default target platform).
   *
   * @throws CoreException
   */
  public void testResetTargetPlatform() throws Exception {
    ITargetDefinition definition = getDefaultTargetPlatorm();
    Set urls = getAllBundleURLs(definition);
    Set fragments = new HashSet();
    TargetBundle[] bundles = definition.getBundles();
    for (int i = 0; i < bundles.length; i++) {
      if (bundles[i].isFragment()) {
        fragments.add(new File(bundles[i].getBundleInfo().getLocation()).toURL());
      }
    }

    // current platform
    IPluginModelBase[] models = TargetPlatformHelper.getPDEState().getTargetModels();

    // should be equivalent
    assertEquals("Should have same number of bundles", urls.size(), models.length);
    for (int i = 0; i < models.length; i++) {
      String location = models[i].getInstallLocation();
      URL url = new File(location).toURL();
      assertTrue("Missing plug-in " + location, urls.contains(url));
      if (models[i].isFragmentModel()) {
        assertTrue("Missing fragmnet", fragments.remove(url));
      }
    }
    assertTrue("Different number of fragments", fragments.isEmpty());
  }
コード例 #6
0
 public static IMarker[] getProblemsFor(IResource resource) {
   try {
     if (resource != null && resource.exists()) {
       IMarker[] markers =
           resource.findMarkers(
               IJavaModelMarker.JAVA_MODEL_PROBLEM_MARKER, false, IResource.DEPTH_INFINITE);
       Set markerTypes =
           JavaModelManager.getJavaModelManager().compilationParticipants.managedMarkerTypes();
       if (markerTypes.isEmpty()) return markers;
       ArrayList markerList = new ArrayList(5);
       for (int i = 0, length = markers.length; i < length; i++) {
         markerList.add(markers[i]);
       }
       Iterator iterator = markerTypes.iterator();
       while (iterator.hasNext()) {
         markers = resource.findMarkers((String) iterator.next(), false, IResource.DEPTH_INFINITE);
         for (int i = 0, length = markers.length; i < length; i++) {
           markerList.add(markers[i]);
         }
       }
       IMarker[] result;
       markerList.toArray(result = new IMarker[markerList.size()]);
       return result;
     }
   } catch (CoreException e) {
     // assume there are no problems
   }
   return new IMarker[0];
 }
コード例 #7
0
  private void updateCursorHighlighting(boolean scroll) {
    hideBalloon();

    if (myCursorHighlighter != null) {
      HighlightManager.getInstance(mySearchResults.getProject())
          .removeSegmentHighlighter(mySearchResults.getEditor(), myCursorHighlighter);
      myCursorHighlighter = null;
    }

    final FindResult cursor = mySearchResults.getCursor();
    Editor editor = mySearchResults.getEditor();
    SelectionModel selection = editor.getSelectionModel();
    if (cursor != null) {
      Set<RangeHighlighter> dummy = new HashSet<RangeHighlighter>();
      highlightRange(
          cursor, new TextAttributes(null, null, Color.BLACK, EffectType.ROUNDED_BOX, 0), dummy);
      if (!dummy.isEmpty()) {
        myCursorHighlighter = dummy.iterator().next();
      }

      if (scroll) {
        if (mySearchResults.getFindModel().isGlobal()) {
          FoldingModel foldingModel = editor.getFoldingModel();
          final FoldRegion[] allRegions = editor.getFoldingModel().getAllFoldRegions();

          foldingModel.runBatchFoldingOperation(
              new Runnable() {
                @Override
                public void run() {
                  for (FoldRegion region : allRegions) {
                    if (!region.isValid()) continue;
                    if (cursor.intersects(TextRange.create(region))) {
                      region.setExpanded(true);
                    }
                  }
                }
              });
          selection.setSelection(cursor.getStartOffset(), cursor.getEndOffset());

          editor.getCaretModel().moveToOffset(cursor.getEndOffset());
          editor.getScrollingModel().scrollToCaret(ScrollType.CENTER);
        } else {
          if (!SearchResults.insideVisibleArea(editor, cursor)) {
            LogicalPosition pos = editor.offsetToLogicalPosition(cursor.getStartOffset());
            editor.getScrollingModel().scrollTo(pos, ScrollType.CENTER);
          }
        }
      }
      editor
          .getScrollingModel()
          .runActionOnScrollingFinished(
              new Runnable() {
                @Override
                public void run() {
                  showReplacementPreview();
                }
              });
    }
  }
コード例 #8
0
  public boolean isProtectedMethod(String method) {
    boolean retval = false;

    if (protectedMethods.isEmpty() || protectedMethods.contains(method)) {
      retval = true;
    }

    return retval;
  }
コード例 #9
0
ファイル: Grammar.java プロジェクト: iguana-parser/iguana
    public Grammar build() {

      Set<RuntimeException> exceptions = validate(rules, definitions);

      if (!exceptions.isEmpty()) {
        throw new GrammarValidationException(exceptions);
      }

      return new Grammar(this);
    }
コード例 #10
0
 /**
  * get all types scanned. this is effectively similar to getting all subtypes of Object.
  *
  * <p>depends on SubTypesScanner configured with {@code SubTypesScanner(false)}, otherwise {@code
  * ReflectionsException} is thrown
  *
  * <p><i>note using this might be a bad practice. it is better to get types matching some
  * criteria, such as {@link #getSubTypesOf(Class)} or {@link #getTypesAnnotatedWith(Class)}</i>
  *
  * @return Set of String, and not of Class, in order to avoid definition of all types in PermGen
  */
 public Set<String> getAllTypes() {
   Set<String> allTypes =
       Sets.newHashSet(store.getAll(index(SubTypesScanner.class), Object.class.getName()));
   if (allTypes.isEmpty()) {
     throw new ReflectionsException(
         "Couldn't find subtypes of Object. "
             + "Make sure SubTypesScanner initialized to include Object class - new SubTypesScanner(false)");
   }
   return allTypes;
 }
コード例 #11
0
    // we don't care about the return value but care about it throwing exception
    public void runMayThrow() throws Exception {
      if (endpoints.isEmpty()) {
        differencingDone.signalAll();
        logger.info(
            "No neighbors to repair with for "
                + tablename
                + " on "
                + range
                + ": "
                + getName()
                + " completed.");
        return;
      }

      // Checking all nodes are live
      for (InetAddress endpoint : endpoints) {
        if (!FailureDetector.instance.isAlive(endpoint)) {
          differencingDone.signalAll();
          logger.info(
              "Could not proceed on repair because a neighbor ("
                  + endpoint
                  + ") is dead: "
                  + getName()
                  + " failed.");
          return;
        }
      }

      AntiEntropyService.instance.sessions.put(getName(), this);
      Gossiper.instance.register(this);
      FailureDetector.instance.registerFailureDetectionEventListener(this);
      try {
        // Create and queue a RepairJob for each column family
        for (String cfname : cfnames) {
          RepairJob job = new RepairJob(cfname);
          jobs.offer(job);
          activeJobs.put(cfname, job);
        }

        jobs.peek().sendTreeRequests();

        // block whatever thread started this session until all requests have been returned:
        // if this thread dies, the session will still complete in the background
        completed.await();
        if (exception != null) throw exception;
      } catch (InterruptedException e) {
        throw new RuntimeException(
            "Interrupted while waiting for repair: repair will continue in the background.");
      } finally {
        FailureDetector.instance.unregisterFailureDetectionEventListener(this);
        Gossiper.instance.unregister(this);
        AntiEntropyService.instance.sessions.remove(getName());
      }
    }
コード例 #12
0
  /**
   * Indicates whether to include the entry with the specified DN in the import.
   *
   * @param dn The DN of the entry for which to make the determination.
   * @return <CODE>true</CODE> if the entry with the specified DN should be included in the import,
   *     or <CODE>false</CODE> if not.
   */
  public boolean includeEntry(DN dn) {
    if (!excludeBranches.isEmpty()) {
      for (DN excludeBranch : excludeBranches) {
        if (excludeBranch.isAncestorOf(dn)) {
          return false;
        }
      }
    }

    if (!includeBranches.isEmpty()) {
      for (DN includeBranch : includeBranches) {
        if (includeBranch.isAncestorOf(dn)) {
          return true;
        }
      }

      return false;
    }

    return true;
  }
コード例 #13
0
  private Set outputUrlResults(String url, Set m_inclset, Set m_exclset) {
    Set new_incls = new TreeSet(CollectionUtils.subtract(m_inclset, m_reported));
    Set new_excls = new TreeSet(CollectionUtils.subtract(m_exclset, m_reported));
    if (!m_inclset.isEmpty()) {
      outputMessage(
          "\nIncluded Urls: ("
              + new_incls.size()
              + " new, "
              + (m_inclset.size() - new_incls.size())
              + " old)",
          URL_SUMMARY_MESSAGE);
      depth_incl[m_curDepth - 1] += new_incls.size();
    }
    for (Iterator it = new_incls.iterator(); it.hasNext(); ) {
      outputMessage(it.next().toString(), PLAIN_MESSAGE);
    }

    if (!m_exclset.isEmpty()) {
      outputMessage(
          "\nExcluded Urls: ("
              + new_excls.size()
              + " new, "
              + (m_exclset.size() - new_excls.size())
              + " old)",
          URL_SUMMARY_MESSAGE);
    }
    for (Iterator it = new_excls.iterator(); it.hasNext(); ) {
      outputMessage(it.next().toString(), PLAIN_MESSAGE);
    }
    m_reported.addAll(new_incls);
    m_reported.addAll(new_excls);

    if (m_outWriter != null) {
      try {
        m_outWriter.flush();
      } catch (IOException ex) {
      }
    }
    return new_incls;
  }
コード例 #14
0
ファイル: RandomPointset.java プロジェクト: nqnliu/101imp
 // Count # of hulls in a point set
 public int findConvexHulls() {
   Set<Point> mySet = new HashSet<Point>(Arrays.asList(this.getPointSet()));
   Point[] convexhull;
   int numhulls = 0;
   while (mySet.isEmpty() == false) {
     convexhull = this.grahamScan(mySet);
     numhulls++;
     for (int i = 0; i < convexhull.length; i++) {
       mySet.remove(convexhull[i]);
     }
   }
   return numhulls;
 }
コード例 #15
0
  /**
   * Indicates whether the specified attribute should be included in the entries read from the LDIF.
   *
   * @param attributeType The attribute type for which to make the determination.
   * @return <CODE>true</CODE> if the specified attribute should be included in the entries read
   *     from the LDIF, or <CODE>false</CODE> if not.
   */
  public boolean includeAttribute(AttributeType attributeType) {
    if (!excludeAttributes.isEmpty() && excludeAttributes.contains(attributeType)) {
      return false;
    }

    if ((excludeAllOpAttrs && attributeType.isOperational())
        || (excludeAllUserAttrs && !attributeType.isOperational())) {
      return false;
    }

    if ((includeAllUserAttrs && !attributeType.isOperational())
        || (includeAllOpAttrs && attributeType.isOperational())) {
      return true;
    }

    if (!includeAttributes.isEmpty()) {
      return includeAttributes.contains(attributeType);
    } else if ((includeAllUserAttrs && attributeType.isOperational())
        || (includeAllOpAttrs && !attributeType.isOperational())) {
      return false;
    }
    return true;
  }
コード例 #16
0
  /**
   * Returns the live methods of a program whose root methods are the <tt>main</tt> method of a set
   * of classes.
   *
   * @param classes Names of classes containing root methods
   * @param context Repository for accessing BLOAT stuff
   * @return The <tt>MemberRef</tt>s of the live methods
   */
  private static Collection liveMethods(final Collection classes, final BloatContext context) {

    // Determine the roots of the call graph
    final Set roots = new HashSet();
    Iterator iter = classes.iterator();
    while (iter.hasNext()) {
      final String className = (String) iter.next();
      try {
        final ClassEditor ce = context.editClass(className);
        final MethodInfo[] methods = ce.methods();

        for (int i = 0; i < methods.length; i++) {
          final MethodEditor me = context.editMethod(methods[i]);

          if (!me.name().equals("main")) {
            continue;
          }

          BloatBenchmark.tr("  Root " + ce.name() + "." + me.name() + me.type());
          roots.add(me.memberRef());
        }

      } catch (final ClassNotFoundException ex1) {
        BloatBenchmark.err.println("** Could not find class: " + ex1.getMessage());
        System.exit(1);
      }
    }

    if (roots.isEmpty()) {
      BloatBenchmark.err.print("** No main method found in classes: ");
      iter = classes.iterator();
      while (iter.hasNext()) {
        final String name = (String) iter.next();
        BloatBenchmark.err.print(name);
        if (iter.hasNext()) {
          BloatBenchmark.err.print(", ");
        }
      }
      BloatBenchmark.err.println("");
    }

    context.setRootMethods(roots);
    final CallGraph cg = context.getCallGraph();

    final Set liveMethods = new TreeSet(new MemberRefComparator());
    liveMethods.addAll(cg.liveMethods());

    return (liveMethods);
  }
コード例 #17
0
  public void testSourcesHaveLicense() throws IOException {
    // get a list of all the files in our source directories
    final List<File> sourceFiles = getSourceFiles();

    // check each source file and add it to the failure set if it doesn't contain the license header
    // comment
    final Set<String> failures = new HashSet<String>();
    for (File src : sourceFiles) {
      if (src.getPath().toLowerCase().endsWith(".java") && !sourceHasLicense(src))
        failures.add(src.getPath());
    }

    // fail if there were failures
    if (!failures.isEmpty())
      fail("the following files do not have the correct license header" + failures);
  }
コード例 #18
0
  public static void main(String[] args) throws IOException {
    BufferedReader reader = new BufferedReader(new InputStreamReader(System.in));
    String line = null;
    String[] split = null;

    int cases = Integer.valueOf(reader.readLine());
    int N, j, z, min, max, result;

    String matching = null, newMatch = null, tmpLine;
    Set<String> set = new HashSet<String>();
    boolean noResult = false;

    for (int i = 1; i <= cases; i++) {
      N = Integer.valueOf(reader.readLine());
      set = new HashSet<String>();
      noResult = false;
      result = 0;
      min = Integer.MAX_VALUE;
      max = Integer.MIN_VALUE;

      for (z = 0; z < N; z++) {
        line = reader.readLine();
        tmpLine = REGEX_PATTERN.matcher(line).replaceAll("$1");

        if (set.isEmpty()) {
          set.add(tmpLine);
        } else if (!set.contains(tmpLine)) {
          noResult = true;
          break;
        }

        min = Math.min(min, line.length());
        max = Math.max(max, line.length());
      }

      if (!noResult) {
        result = max - min;
      }

      if (noResult) {
        System.out.printf("Case #%d: Fegla Won%n", i);
      } else {
        System.out.printf("Case #%d: %d%n", i, result);
      }
    }
  }
コード例 #19
0
  public Map<String, Object> getMapParams(Long id, Map<String, Object> mapFormatSql) {
    Template tpl = this.templateDao.load(id);
    if (tpl == null) throw new CoreException("template is not exists");

    // 取参数集合
    Set<TemplateParam> tplps = tpl.getParams();
    // 没配置模板参数处理
    if (tplps == null || tplps.isEmpty()) return null;

    Map<String, Object> formattedMap = new HashMap<String, Object>();
    Map<String, Object> tempMap;
    // 遍历模板参数集合获取格式化的替换参数
    for (TemplateParam tp : tplps) {
      tempMap = templateParamService.getMapParams(tp, mapFormatSql);
      if (tempMap != null) formattedMap.putAll(tempMap);
    }
    return formattedMap.isEmpty() ? null : formattedMap;
  }
コード例 #20
0
  private void writeWeights(
      String orig, GeneBranch from, GeneBranch to, String edgeType, Writer writer)
      throws IOException {
    Set<String> dwstr = to.getAllGenes();
    dwstr.retainAll(downstream.get(orig));
    assert !dwstr.isEmpty();
    double cumPval = calcPVal(orig, dwstr);
    boolean upreg = calcChangeDirection(orig, to.gene);

    String key = from.gene + " " + edgeType + " " + to.gene;
    writer.write("edge\t" + key + "\tcolor\t" + val2Color(cumPval, 0) + "\n");
    writer.write("edge\t" + key + "\twidth\t2\n");

    if (affectedDw.get(orig).contains(to.gene)) {
      double pval = calcPVal(orig, Collections.singleton(to.gene));
      writer.write("node\t" + to.gene + "\tcolor\t" + val2Color(pval, upreg ? 1 : -1) + "\n");
    } else {
      writer.write("node\t" + to.gene + "\tcolor\t255 255 255\n");
    }
  }
コード例 #21
0
  public VocabBuilder(String filename, RDFFormat format) throws IOException, RDFParseException {
    Path file = Paths.get(filename);
    if (!Files.exists(file)) throw new FileNotFoundException(filename);

    if (format == null) {
      format = Rio.getParserFormatForFileName(filename);
      log.trace("detected input format from filename {}: {}", filename, format);
    }

    try (final InputStream inputStream = Files.newInputStream(file)) {
      log.trace("Loading input file");
      model = Rio.parse(inputStream, "", format);
    }

    // import
    Set<Resource> owlOntologies = model.filter(null, RDF.TYPE, OWL.ONTOLOGY).subjects();
    if (!owlOntologies.isEmpty()) {
      setPrefix(owlOntologies.iterator().next().stringValue());
    }
  }
コード例 #22
0
ファイル: NCubeManager.java プロジェクト: ccriderGAIG/n-cube
  /**
   * Fetch all the n-cube names for the given ApplicationID. This API will load all cube records for
   * the ApplicationID (NCubeInfoDtos), and then get the names from them.
   *
   * @return Set<String> n-cube names. If an empty Set is returned, then there are no persisted
   *     n-cubes for the passed in ApplicationID.
   */
  public static Set<String> getCubeNames(ApplicationID appId) {
    Map<String, Object> options = new HashMap<>();
    options.put(SEARCH_ACTIVE_RECORDS_ONLY, true);
    List<NCubeInfoDto> cubeInfos = search(appId, null, null, options);
    Set<String> names = new TreeSet<>();

    for (NCubeInfoDto info : cubeInfos) {
      names.add(info.name);
    }

    if (names.isEmpty()) { // Support tests that load cubes from JSON files...
      // can only be in there as ncubes, not ncubeDtoInfo
      for (Object value : getCacheForApp(appId).values()) {
        if (value instanceof NCube) {
          NCube cube = (NCube) value;
          names.add(cube.getName());
        }
      }
    }
    return new CaseInsensitiveSet<>(names);
  }
コード例 #23
0
    /**
     * Emit Java code for deciding which emit method in the given set applies to an Instruction, and
     * then calling the apprpriate method. The method essentially works by recursively parititioning
     * the given set into two smaller pieces until it finds a set with only one element. On each
     * partition, this method generates code for the appropriate operand type or size query, and
     * then calls itself recursively on the two sets resulting from the partition.
     *
     * <p>This method uses split to determine what test to apply, and emitSingleton when it
     * encounteres a singleton set.
     *
     * <p>Note that the testsPerformed parameter is not needed to do the recursive splitting; this
     * is passed to emitSingleton to help it generate appropriate error checking for operands.
     *
     * @see #split
     * @see #emitSingleton
     * @param opcode the IA32 opcode being generated
     * @param testsPerformed the set of tests already performed
     * @param level the indentation level for pretty printing
     */
    private void emitSet(String opcode, boolean[][] testsPerformed, int level) {
      if (emitters.isEmpty()) {
        // do nothing
      } else if (isSingleton()) emitSingleton(opcode, testsPerformed, level);
      else {
        SplitRecord rec = split();

        if (DEBUG) {
          for (int i = 0; i < level; i++) System.err.print("  ");
          System.err.println("split of " + opcode + "[" + rec.argument + "] for " + rec.test);
        }

        if (testsPerformed[rec.argument][rec.test.ordinal()]) {
          throw new Error(
              "repeated split of "
                  + opcode
                  + "["
                  + rec.argument
                  + "] for "
                  + rec.test
                  + "\n"
                  + this);
        }

        testsPerformed[rec.argument][rec.test.ordinal()] = true;
        EmitterSet[] splits = makeSplit(rec);
        emitTab(level);
        emit("if (");
        emitTest(rec.argument, rec.test);
        emit(") {\n");
        splits[0].emitSet(opcode, testsPerformed, level + 1);
        emit("\n");
        emitTab(level);
        emit("} else {\n");
        splits[1].emitSet(opcode, testsPerformed, level + 1);
        emitTab(level);
        emit("}\n");
        testsPerformed[rec.argument][rec.test.ordinal()] = false;
      }
    }
コード例 #24
0
  private Duple<CrownOperations.Reason, ISynset> getEstimatedSynonym(
      String targetLemma, Set<String> synonyms, POS pos, String gloss) {

    Counter<ISynset> synsetCounts = new ObjectCounter<ISynset>();

    List<String> lemmasInWn = new ArrayList<String>();
    for (String lemma : synonyms) {
      // Get the WordNet sysnet if it exists
      Set<ISynset> senses = WordNetUtils.getSynsets(dict, lemma, pos);
      if (senses.isEmpty()) continue;

      lemmasInWn.add(lemma);
      synsetCounts.countAll(senses);

      // Get the hypernyms of the synset and count their occurrence too
      for (ISynset synset : senses) {
        // Do a sanity check that avoids attaching this Entry if its
        // lemma appears anywhere near the synonoyms.  This check
        // potentially has some false positives since we might avoid
        // putting the lemma somewhere valid (in which case it would
        // have more than would valid location) but is used to avoid
        // noisy integration
        if (WordNetUtils.isAlreadyInWordNet(dict, targetLemma, pos, synset)) {
          return null;
        }

        for (ISynsetID hyper : synset.getRelatedSynsets(Pointer.HYPERNYM)) {
          ISynset hyperSyn = dict.getSynset(hyper);
          if (WordNetUtils.isAlreadyInWordNet(dict, targetLemma, pos, hyperSyn)) {
            return null;
          }
          synsetCounts.count(hyperSyn);
        }
      }
    }

    // Return null if we couldn't find any of the lemma's synonyms or
    // hyponyms in WordNet
    if (synsetCounts.items().isEmpty()) return null;

    // If there was only one lemma in this list in WordNet, try comparing
    // the glosses for just that word to find a match
    if (lemmasInWn.size() == 1) {
      double maxScore = 0;
      ISynset best = null;
      String bestGloss = null;
      Set<ISynset> candidateSynonymSynsets = WordNetUtils.getSynsets(dict, lemmasInWn.get(0), pos);
      for (ISynset candidate : candidateSynonymSynsets) {

        String wnExtendedGloss = WordNetUtils.getGlossWithoutExamples(candidate);
        double score = simFunc.compare(gloss, wnExtendedGloss);
        if (maxScore < score) {
          maxScore = score;
          best = candidate;
          bestGloss = wnExtendedGloss;
        }
      }

      CrownOperations.Reason r = new CrownOperations.Reason(getClass());
      r.set("relation_type", "synonym");
      r.set("heuristic", "single-synonym");
      r.set("max_score", maxScore);
      return new Duple<CrownOperations.Reason, ISynset>(r, best);
    } else {
      // Check for whether there were ties in the max
      ISynset mostFreq = synsetCounts.max();
      int mostFreqCount = synsetCounts.getCount(mostFreq);
      List<ISynset> ties = new ArrayList<ISynset>();
      for (ISynset syn : synsetCounts.items()) {
        int c = synsetCounts.getCount(syn);
        if (c == mostFreqCount) ties.add(syn);
      }

      // If there was only one synset that had the maximum count, then we
      // report this
      if (ties.size() == 1) {

        CrownOperations.Reason r = new CrownOperations.Reason(getClass());
        r.set("relation_type", "synonym");
        r.set("heuristic", "unambiguous-max");
        r.set("count", mostFreqCount);
        return new Duple<CrownOperations.Reason, ISynset>(r, mostFreq);
      }
      // Otherwise, we try breaking ties between the synsets using gloss
      // similarity
      else {

        double maxScore = 0;
        ISynset best = null;
        String bestGloss = null;
        for (ISynset candidate : ties) {
          String wnExtendedGloss = WordNetUtils.getGlossWithoutExamples(candidate);
          double score = simFunc.compare(gloss, wnExtendedGloss);
          if (maxScore < score) {
            maxScore = score;
            best = candidate;
            bestGloss = wnExtendedGloss;
          }
        }

        CrownOperations.Reason r = new CrownOperations.Reason(getClass());
        r.set("relation_type", "synonym");
        r.set("heuristic", "tied-synonyms");
        r.set("max_score", maxScore);
        return new Duple<CrownOperations.Reason, ISynset>(r, best);
      }
    }
  }
コード例 #25
0
  /** Generate an assembler for the opt compiler */
  public static void main(String[] args) {
    try {
      out = new FileWriter(System.getProperty("generateToDir") + "/AssemblerOpt.java");
    } catch (IOException e) {
      throw new Error(e);
    }

    emit("package org.jikesrvm.compilers.opt.mir2mc.ia32;\n\n");
    emit("import org.jikesrvm.*;\n\n");
    emit("import org.jikesrvm.compilers.opt.*;\n\n");
    emit("import org.jikesrvm.compilers.opt.ir.*;\n\n");
    emit("import org.jikesrvm.compilers.opt.ir.ia32.*;\n\n");
    emit("import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.*;\n\n");
    emit("import static org.jikesrvm.compilers.opt.OptimizingCompilerException.opt_assert;\n\n");
    emit("\n\n");

    emit("/**\n");
    emit(" *  This class is the automatically-generated assembler for\n");
    emit(" * the optimizing compiler.  It consists of methods that\n");
    emit(" * understand the possible operand combinations of each\n");
    emit(" * instruction type, and how to translate those operands to\n");
    emit(" * calls to the Assember low-level emit method\n");
    emit(" *\n");
    emit(" * It is generated by GenerateAssembler.java\n");
    emit(" *\n");
    emit(" */\n");
    emit("public class AssemblerOpt extends AssemblerBase {\n\n");

    emitTab(1);
    emit("/**\n");
    emitTab(1);
    emit(" * @see org.jikesrvm.ArchitectureSpecific.Assembler\n");
    emitTab(1);
    emit(" */\n");
    emitTab(1);
    emit("public AssemblerOpt(int bcSize, boolean print, IR ir) {\n");
    emitTab(2);
    emit("super(bcSize, print, ir);\n");
    emitTab(1);
    emit("}");
    emit("\n\n");

    Method[] emitters = lowLevelAsm.getDeclaredMethods();
    Set<String> opcodes = getOpcodes(emitters);

    Iterator<String> i = opcodes.iterator();
    while (i.hasNext()) {
      String opcode = (String) i.next();
      setCurrentOpcode(opcode);
      emitTab(1);
      emit("/**\n");
      emitTab(1);
      emit(" *  Emit the given instruction, assuming that\n");
      emitTab(1);
      emit(" * it is a " + currentFormat + " instruction\n");
      emitTab(1);
      emit(" * and has a " + currentOpcode + " operator\n");
      emitTab(1);
      emit(" *\n");
      emitTab(1);
      emit(" * @param inst the instruction to assemble\n");
      emitTab(1);
      emit(" */\n");
      emitTab(1);
      emit("private void do" + opcode + "(Instruction inst) {\n");
      EmitterSet emitter = buildSetForOpcode(emitters, opcode);
      boolean[][] tp = new boolean[4][ArgumentType.values().length];
      emitter.emitSet(opcode, tp, 2);
      emitTab(1);
      emit("}\n\n");
    }

    emitTab(1);
    emit("/**\n");
    emitTab(1);
    emit(" *  The number of instructions emitted so far\n");
    emitTab(1);
    emit(" */\n");
    emitTab(1);
    emit("private int instructionCount = 0;\n\n");

    emitTab(1);
    emit("/**\n");
    emitTab(1);
    emit(" *  Assemble the given instruction\n");
    emitTab(1);
    emit(" *\n");
    emitTab(1);
    emit(" * @param inst the instruction to assemble\n");
    emitTab(1);
    emit(" */\n");
    emitTab(1);
    emit("public void doInst(Instruction inst) {\n");
    emitTab(2);
    emit("instructionCount++;\n");
    emitTab(2);
    emit("resolveForwardReferences(instructionCount);\n");
    emitTab(2);
    emit("switch (inst.getOpcode()) {\n");

    Set<String> emittedOpcodes = new HashSet<String>();

    i = opcodes.iterator();
    while (i.hasNext()) {
      String opcode = i.next();
      Iterator<String> operators = getMatchingOperators(opcode).iterator();
      while (operators.hasNext()) {
        String operator = operators.next();
        emitTab(3);
        emittedOpcodes.add(operator);
        emit("case IA32_" + operator + "_opcode:\n");
      }
      emitTab(4);
      emit("do" + opcode + "(inst);\n");
      emitTab(4);
      emit("break;\n");
    }

    // Special case because doJCC is handwritten to add
    // logic for short-forward branches
    emittedOpcodes.add("JCC");
    emitTab(3);
    emit("case IA32_JCC_opcode:\n");
    emitTab(4);
    emit("doJCC(inst);\n");
    emitTab(4);
    emit("break;\n");

    // Special case because doJMP is handwritten to add
    // logic for short-forward branches
    emittedOpcodes.add("JMP");
    emitTab(3);
    emit("case IA32_JMP_opcode:\n");
    emitTab(4);
    emit("doJMP(inst);\n");
    emitTab(4);
    emit("break;\n");

    // Kludge for IA32_LOCK which needs to call emitLockNextInstruction
    emittedOpcodes.add("LOCK");
    emitTab(3);
    emit("case IA32_LOCK_opcode:\n");
    emitTab(4);
    emit("emitLockNextInstruction();\n");
    emitTab(4);
    emit("break;\n");

    // Kludge for PATCH_POINT
    emitTab(3);
    emit("case IG_PATCH_POINT_opcode:\n");
    emitTab(4);
    emit("emitPatchPoint();\n");
    emitTab(4);
    emit("break;\n");

    // Kludge for LOWTABLESWITCH
    emitTab(3);
    emit("case MIR_LOWTABLESWITCH_opcode:\n");
    emitTab(4);
    emit("doLOWTABLESWITCH(inst);\n");
    emitTab(4);
    emit("// kludge table switches that are unusually long instructions\n");
    emitTab(4);
    emit("instructionCount += MIR_LowTableSwitch.getNumberOfTargets(inst);\n");
    emitTab(4);
    emit("break;\n");

    Set<String> errorOpcodes = getErrorOpcodes(emittedOpcodes);
    if (!errorOpcodes.isEmpty()) {
      i = errorOpcodes.iterator();
      while (i.hasNext()) {
        emitTab(3);
        emit("case IA32_" + i.next() + "_opcode:\n");
      }
      emitTab(4);
      emit(
          "throw new OptimizingCompilerException(inst + \" has unimplemented IA32 opcode (check excludedOpcodes)\");\n");
    }

    emitTab(2);
    emit("}\n");
    emitTab(2);
    emit("inst.setmcOffset( mi );\n");
    emitTab(1);
    emit("}\n\n");

    emit("\n}\n");

    try {
      out.close();
    } catch (IOException e) {
      throw new Error(e);
    }
  }
コード例 #26
0
    public BuildStatus build(final Collection<Module> modules, final Flags flags) {
      boolean incremental = flags.incremental();
      final List<ModuleChunk> chunks = myProjectBuilder.getChunks(flags.tests()).getChunkList();

      for (final ModuleChunk c : chunks) {
        final Set<Module> chunkModules = c.getElements();

        if (!DefaultGroovyMethods.intersect(modules, chunkModules).isEmpty()) {
          final Set<String> removedSources = new HashSet<String>();

          if (incremental) {
            final Set<String> chunkSources = new HashSet<String>();
            final Set<String> outdatedSources = new HashSet<String>();

            for (Module m : chunkModules) {
              final ModuleWrapper mw = getModule(m.getName());

              outdatedSources.addAll(mw.getOutdatedFiles(flags.tests()));
              chunkSources.addAll(mw.getSources(flags.tests()));
              removedSources.addAll(mw.getRemovedFiles(flags.tests()));
            }

            final BuildStatus result =
                iterativeCompile(c, chunkSources, outdatedSources, removedSources, flags);

            incremental = result == BuildStatus.INCREMENTAL;

            if (result == BuildStatus.FAILURE) {
              return result;
            }
          } else {
            new Logger(flags) {
              @Override
              public void log(PrintStream stream) {
                stream.println("Compiling chunk " + c.getName() + " non-incrementally.");
              }
            }.log();

            for (Module m : chunkModules) {
              final ModuleWrapper mw = getModule(m.getName());
              removedSources.addAll(flags.tests() ? mw.getRemovedTests() : mw.getRemovedSources());
            }

            final Set<Module> toClean = new HashSet<Module>();

            for (Module m : chunkModules) {
              if (!cleared.contains(m)) {
                toClean.add(m);
              }
            }

            if (!toClean.isEmpty() && !flags.tests()) {
              builder.clearChunk(new ModuleChunk(toClean), null, ProjectWrapper.this);
              cleared.addAll(toClean);
            }

            final Mappings delta = dependencyMapping.createDelta();
            final Callbacks.Backend deltaCallback = delta.getCallback();

            try {
              builder.buildChunk(c, flags.tests(), null, deltaCallback, ProjectWrapper.this);
            } catch (Exception e) {
              e.printStackTrace();
              return BuildStatus.FAILURE;
            }

            final Set<String> allFiles = new HashSet<String>();

            for (Module m : c.getElements()) {
              final ModuleWrapper module = getModule(m.getName());
              affectedFiles.removeAll(module.getSources(flags.tests()));
              allFiles.addAll(module.getSources(flags.tests()));
            }

            final Collection<File> files = new HashSet<File>();

            for (String f : allFiles) {
              files.add(new File(f));
            }

            dependencyMapping.integrate(delta, files, removedSources);

            for (Module m : chunkModules) {
              Reporter.reportBuildSuccess(m, flags.tests());
            }
          }
        }
      }

      return BuildStatus.INCREMENTAL;
    }
コード例 #27
0
    BuildStatus iterativeCompile(
        final ModuleChunk chunk,
        final Set<String> sources,
        final Set<String> outdated,
        final Set<String> removed,
        final Flags flags) {
      final Collection<String> filesToCompile =
          DefaultGroovyMethods.intersect(affectedFiles, sources);

      if (outdated != null) {
        for (String s : outdated) {
          assert (s != null);
        }

        filesToCompile.addAll(outdated);
      }

      filesToCompile.removeAll(compiledFiles);

      if (!filesToCompile.isEmpty() || removed != null) {
        final Set<String> outputFiles = new HashSet<String>();

        for (String f : filesToCompile) {
          final Set<ClassRepr> classes = dependencyMapping.getClasses(f);

          if (classes != null) {
            for (ClassRepr cr : classes) {
              outputFiles.add(cr.getFileName());
            }
          }
        }

        if (removed != null) {
          for (String f : removed) {
            final Set<ClassRepr> classes = dependencyMapping.getClasses(f);
            if (classes != null) {
              for (ClassRepr cr : classes) {
                outputFiles.add(cr.getFileName());
              }
            }
          }
        }

        if (!outputFiles.isEmpty()) {
          new Logger(flags) {
            @Override
            public void log(PrintStream stream) {
              stream.println("Cleaning output files:");
              logFilePaths(stream, outputFiles);
              stream.println("End of files");
            }
          }.log();

          builder.clearChunk(chunk, outputFiles, ProjectWrapper.this);
        }

        final Mappings delta = dependencyMapping.createDelta();
        final Callbacks.Backend deltaBackend = delta.getCallback();

        new Logger(flags) {
          @Override
          public void log(PrintStream stream) {
            stream.println("Compiling files:");
            logFilePaths(stream, filesToCompile);
            stream.println("End of files");
          }
        }.log();

        boolean buildException = false;

        try {
          builder.buildChunk(
              chunk, flags.tests(), filesToCompile, deltaBackend, ProjectWrapper.this);
        } catch (Exception e) {
          e.printStackTrace();
          buildException = true;
        }

        if (!buildException) {
          compiledFiles.addAll(filesToCompile);
          affectedFiles.removeAll(filesToCompile);

          final Collection<File> files = new HashSet<File>();
          final Collection<File> compiled = new HashSet<File>();

          for (String f : filesToCompile) {
            files.add(new File(f));
          }

          for (String f : compiledFiles) {
            compiled.add(new File(f));
          }

          final Collection<File> affected = new HashSet<File>();

          final boolean incremental =
              dependencyMapping.differentiate(delta, removed, files, compiled, affected);

          for (File a : affected) {
            affectedFiles.add(FileUtil.toSystemIndependentName(a.getAbsolutePath()));
          }

          dependencyMapping.integrate(delta, files, removed);

          if (!incremental) {
            affectedFiles.addAll(sources);
            affectedFiles.removeAll(compiledFiles);

            final BuildStatus result = iterativeCompile(chunk, sources, null, null, flags);

            if (result == BuildStatus.FAILURE) {
              return result;
            }

            return BuildStatus.CONSERVATIVE;
          }

          return iterativeCompile(chunk, sources, null, null, flags);
        } else {
          return BuildStatus.FAILURE;
        }
      } else {
        for (Module m : chunk.getElements()) {
          Reporter.reportBuildSuccess(m, flags.tests());
        }
      }

      return BuildStatus.INCREMENTAL;
    }
コード例 #28
0
  public static boolean doPrepare(
      final Module module, final List<String> errorMessages, final List<String> successMessages) {
    final String pluginName = module.getName();
    final String defaultPath =
        new File(module.getModuleFilePath()).getParent() + File.separator + pluginName;
    final HashSet<Module> modules = new HashSet<Module>();
    PluginBuildUtil.getDependencies(module, modules);
    modules.add(module);
    final Set<Library> libs = new HashSet<Library>();
    for (Module dep : modules) {
      PluginBuildUtil.getLibraries(dep, libs);
    }

    final Map<Module, String> jpsModules = collectJpsPluginModules(module);
    modules.removeAll(jpsModules.keySet());

    final boolean isZip = !libs.isEmpty() || !jpsModules.isEmpty();
    final String oldPath = defaultPath + (isZip ? JAR_EXTENSION : ZIP_EXTENSION);
    final File oldFile = new File(oldPath);
    if (oldFile.exists()) {
      if (Messages.showYesNoDialog(
              module.getProject(),
              DevKitBundle.message("suggest.to.delete", oldPath),
              DevKitBundle.message("info.message"),
              Messages.getInformationIcon())
          == Messages.YES) {
        FileUtil.delete(oldFile);
      }
    }

    final String dstPath = defaultPath + (isZip ? ZIP_EXTENSION : JAR_EXTENSION);
    final File dstFile = new File(dstPath);
    return clearReadOnly(module.getProject(), dstFile)
        && ProgressManager.getInstance()
            .runProcessWithProgressSynchronously(
                new Runnable() {
                  public void run() {

                    final ProgressIndicator progressIndicator =
                        ProgressManager.getInstance().getProgressIndicator();
                    if (progressIndicator != null) {
                      progressIndicator.setText(
                          DevKitBundle.message("prepare.for.deployment.common"));
                      progressIndicator.setIndeterminate(true);
                    }
                    try {
                      File jarFile = preparePluginsJar(module, modules);
                      if (isZip) {
                        processLibrariesAndJpsPlugins(
                            jarFile, dstFile, pluginName, libs, jpsModules, progressIndicator);
                      } else {
                        FileUtil.copy(jarFile, dstFile);
                      }
                      LocalFileSystem.getInstance()
                          .refreshIoFiles(Collections.singleton(dstFile), true, false, null);
                      successMessages.add(
                          DevKitBundle.message(
                              "saved.message", isZip ? 1 : 2, pluginName, dstPath));
                    } catch (final IOException e) {
                      errorMessages.add(e.getMessage() + "\n(" + dstPath + ")");
                    }
                  }
                },
                DevKitBundle.message("prepare.for.deployment", pluginName),
                true,
                module.getProject());
  }
コード例 #29
0
  @Override
  public void emitTuples() {
    if (currentWindowId <= idempotentStorageManager.getLargestRecoveryWindow()) {
      return;
    }

    if (inputStream == null) {
      try {
        if (currentFile != null && offset > 0) {
          // open file resets offset to 0 so this a way around it.
          int tmpOffset = offset;
          if (fs.exists(new Path(currentFile))) {
            this.inputStream = openFile(new Path(currentFile));
            offset = tmpOffset;
            skipCount = tmpOffset;
          } else {
            currentFile = null;
            offset = 0;
            skipCount = 0;
          }
        } else if (!unfinishedFiles.isEmpty()) {
          retryFailedFile(unfinishedFiles.poll());
        } else if (!pendingFiles.isEmpty()) {
          String newPathString = pendingFiles.iterator().next();
          pendingFiles.remove(newPathString);
          if (fs.exists(new Path(newPathString)))
            this.inputStream = openFile(new Path(newPathString));
        } else if (!failedFiles.isEmpty()) {
          retryFailedFile(failedFiles.poll());
        } else {
          scanDirectory();
        }
      } catch (IOException ex) {
        failureHandling(ex);
      }
    }
    if (inputStream != null) {
      int startOffset = offset;
      String file = currentFile; // current file is reset to null when closed.

      try {
        int counterForTuple = 0;
        while (counterForTuple++ < emitBatchSize) {
          T line = readEntity();
          if (line == null) {
            LOG.info("done reading file ({} entries).", offset);
            closeFile(inputStream);
            break;
          }

          // If skipCount is non zero, then failed file recovery is going on, skipCount is
          // used to prevent already emitted records from being emitted again during recovery.
          // When failed file is open, skipCount is set to the last read offset for that file.
          //
          if (skipCount == 0) {
            offset++;
            emit(line);
          } else {
            skipCount--;
          }
        }
      } catch (IOException e) {
        failureHandling(e);
      }
      // Only when something was emitted from the file then we record it for entry.
      if (offset > startOffset) {
        currentWindowRecoveryState.add(new RecoveryEntry(file, startOffset, offset));
      }
    }
  }
コード例 #30
0
    // we don't care about the return value but care about it throwing exception
    public void runMayThrow() throws Exception {
      logger.info(
          String.format(
              "[repair #%s] new session: will sync %s on range %s for %s.%s",
              getName(), repairedNodes(), range, tablename, Arrays.toString(cfnames)));

      if (endpoints.isEmpty()) {
        differencingDone.signalAll();
        logger.info(
            String.format(
                "[repair #%s] No neighbors to repair with on range %s: session completed",
                getName(), range));
        return;
      }

      // Checking all nodes are live
      for (InetAddress endpoint : endpoints) {
        if (!FailureDetector.instance.isAlive(endpoint)) {
          differencingDone.signalAll();
          logger.info(
              String.format(
                  "[repair #%s] Cannot proceed on repair because a neighbor (%s) is dead: session failed",
                  getName(), endpoint));
          return;
        }

        if (Gossiper.instance.getVersion(endpoint) < MessagingService.VERSION_11 && isSequential) {
          logger.info(
              String.format(
                  "[repair #%s] Cannot repair using snapshots as node %s is pre-1.1",
                  getName(), endpoint));
          return;
        }
      }

      AntiEntropyService.instance.sessions.put(getName(), this);
      Gossiper.instance.register(this);
      FailureDetector.instance.registerFailureDetectionEventListener(this);
      try {
        // Create and queue a RepairJob for each column family
        for (String cfname : cfnames) {
          RepairJob job = new RepairJob(cfname);
          jobs.offer(job);
          activeJobs.put(cfname, job);
        }

        jobs.peek().sendTreeRequests();

        // block whatever thread started this session until all requests have been returned:
        // if this thread dies, the session will still complete in the background
        completed.await();
        if (exception == null) {
          logger.info(String.format("[repair #%s] session completed successfully", getName()));
        } else {
          logger.error(
              String.format("[repair #%s] session completed with the following error", getName()),
              exception);
          throw exception;
        }
      } catch (InterruptedException e) {
        throw new RuntimeException("Interrupted while waiting for repair.");
      } finally {
        // mark this session as terminated
        terminate();
        FailureDetector.instance.unregisterFailureDetectionEventListener(this);
        Gossiper.instance.unregister(this);
        AntiEntropyService.instance.sessions.remove(getName());
      }
    }