Esempio n. 1
0
    private Action batch() {
      if (aggregate == null) {
        aggregate = bufferPool.acquire(bufferSize, true);
        if (LOG.isDebugEnabled()) {
          LOG.debug("{} acquired aggregate buffer {}", FrameFlusher.this, aggregate);
        }
      }

      // Do not allocate the iterator here.
      for (int i = 0; i < entries.size(); ++i) {
        FrameEntry entry = entries.get(i);

        entry.generateHeaderBytes(aggregate);

        ByteBuffer payload = entry.frame.getPayload();
        if (BufferUtil.hasContent(payload)) {
          BufferUtil.append(aggregate, payload);
        }
      }
      if (LOG.isDebugEnabled()) {
        LOG.debug("{} aggregated {} frames: {}", FrameFlusher.this, entries.size(), entries);
      }
      succeeded();
      return Action.SCHEDULED;
    }
  /**
   * Processes a new connection making it idle or active depending on whether requests are waiting
   * to be sent.
   *
   * <p>A new connection is created when a request needs to be executed; it is possible that the
   * request that triggered the request creation is executed by another connection that was just
   * released, so the new connection may become idle.
   *
   * <p>If a request is waiting to be executed, it will be dequeued and executed by the new
   * connection.
   *
   * @param connection the new connection
   */
  public void process(final C connection) {
    HttpClient client = getHttpClient();
    final HttpExchange exchange = getHttpExchanges().poll();
    if (LOG.isDebugEnabled())
      LOG.debug("Processing exchange {} on {} of {}", exchange, connection, this);
    if (exchange == null) {
      if (!connectionPool.release(connection)) connection.close();

      if (!client.isRunning()) {
        if (LOG.isDebugEnabled()) LOG.debug("{} is stopping", client);
        connection.close();
      }
    } else {
      final Request request = exchange.getRequest();
      Throwable cause = request.getAbortCause();
      if (cause != null) {
        if (LOG.isDebugEnabled()) LOG.debug("Aborted before processing {}: {}", exchange, cause);
        // It may happen that the request is aborted before the exchange
        // is created. Aborting the exchange a second time will result in
        // a no-operation, so we just abort here to cover that edge case.
        exchange.abort(cause);
      } else {
        send(connection, exchange);
      }
    }
  }
Esempio n. 3
0
    @Override
    public void run() {
      if (LOG.isDebugEnabled()) {
        LOG.debug("==> ConsumerRunnable.run()");
      }
      while (true) {
        try {
          if (hasNext()) {
            EntityNotification notification = consumer.peek();
            if (notification != null) {
              if (LOG.isDebugEnabled()) {
                LOG.debug("Notification=" + getPrintableEntityNotification(notification));
              }

              ServiceTags serviceTags =
                  AtlasNotificationMapper.processEntityNotification(notification);
              if (serviceTags == null) {
                LOG.error(
                    "No ServiceTags built for notification :"
                        + getPrintableEntityNotification(notification));
              } else {
                updateSink(serviceTags);
              }
            } else {
              LOG.error("Null entityNotification received from Kafka!! Ignoring..");
            }
            // Move iterator forward
            consumer.next();
          }
        } catch (Exception exception) {
          LOG.error("Caught exception..: ", exception);
          return;
        }
      }
    }
Esempio n. 4
0
    private void addLeafTask(TaskAttemptToSchedulerEvent event) {
      TaskAttempt taskAttempt = event.getTaskAttempt();
      List<DataLocation> locations = taskAttempt.getTask().getDataLocations();

      for (DataLocation location : locations) {
        String host = location.getHost();
        leafTaskHosts.add(host);

        HostVolumeMapping hostVolumeMapping = leafTaskHostMapping.get(host);
        if (hostVolumeMapping == null) {
          String rack = RackResolver.resolve(host).getNetworkLocation();
          hostVolumeMapping = new HostVolumeMapping(host, rack);
          leafTaskHostMapping.put(host, hostVolumeMapping);
        }
        hostVolumeMapping.addTaskAttempt(location.getVolumeId(), taskAttempt);

        if (LOG.isDebugEnabled()) {
          LOG.debug("Added attempt req to host " + host);
        }

        HashSet<TaskAttemptId> list = leafTasksRackMapping.get(hostVolumeMapping.getRack());
        if (list == null) {
          list = new HashSet<>();
          leafTasksRackMapping.put(hostVolumeMapping.getRack(), list);
        }

        list.add(taskAttempt.getId());

        if (LOG.isDebugEnabled()) {
          LOG.debug("Added attempt req to rack " + hostVolumeMapping.getRack());
        }
      }

      leafTasks.add(taskAttempt.getId());
    }
    public void run() {
      if (LOG.isDebugEnabled()) {
        LOG.debug("Cleaning up expired web clients.");
      }

      synchronized (ajaxWebClients) {
        Iterator<Map.Entry<String, AjaxWebClient>> it = ajaxWebClients.entrySet().iterator();
        while (it.hasNext()) {
          Map.Entry<String, AjaxWebClient> e = it.next();
          String key = e.getKey();
          AjaxWebClient val = e.getValue();
          if (LOG.isDebugEnabled()) {
            LOG.debug(
                "AjaxWebClient "
                    + key
                    + " last accessed "
                    + val.getMillisSinceLastAccessed() / 1000
                    + " seconds ago.");
          }
          // close an expired client and remove it from the ajaxWebClients hash.
          if (val.closeIfExpired()) {
            if (LOG.isDebugEnabled()) {
              LOG.debug("Removing expired AjaxWebClient " + key);
            }
            it.remove();
          }
        }
      }
    }
    synchronized boolean finish() {
      if (LOG.isDebugEnabled()) {
        LOG.debug("Calling finish for " + sid);
      }

      if (!running) {
        /*
         * Avoids running finish() twice.
         */
        return running;
      }

      running = false;
      closeSocket(sock);
      // channel = null;

      this.interrupt();
      if (recvWorker != null) {
        recvWorker.finish();
      }

      if (LOG.isDebugEnabled()) {
        LOG.debug("Removing entry from senderWorkerMap sid=" + sid);
      }
      senderWorkerMap.remove(sid, this);
      threadCnt.decrementAndGet();
      return running;
    }
Esempio n. 7
0
  @Override
  public boolean rename(Path src, Path dst) throws IOException {
    // Attempt rename using Java API.
    File srcFile = pathToFile(src);
    File dstFile = pathToFile(dst);
    if (srcFile.renameTo(dstFile)) {
      return true;
    }

    // Enforce POSIX rename behavior that a source directory replaces an existing
    // destination if the destination is an empty directory.  On most platforms,
    // this is already handled by the Java API call above.  Some platforms
    // (notably Windows) do not provide this behavior, so the Java API call above
    // fails.  Delete destination and attempt rename again.
    if (this.exists(dst)) {
      FileStatus sdst = this.getFileStatus(dst);
      if (sdst.isDirectory() && dstFile.list().length == 0) {
        if (LOG.isDebugEnabled()) {
          LOG.debug("Deleting empty destination and renaming " + src + " to " + dst);
        }
        if (this.delete(dst, false) && srcFile.renameTo(dstFile)) {
          return true;
        }
      }
    }

    // The fallback behavior accomplishes the rename by a full copy.
    if (LOG.isDebugEnabled()) {
      LOG.debug("Falling through to a copy of " + src + " to " + dst);
    }
    return FileUtil.copy(this, src, this, dst, true, getConf());
  }
  /**
   * DOCUMENT ME!
   *
   * @param value DOCUMENT ME!
   */
  private void setPointValue(final double value) {
    if (isEditable()) {
      if (LOG.isDebugEnabled()) {
        LOG.debug("change bean value to " + value);
      }
      final CidsBean pointBean = getCidsBean();
      final double oldValue = LinearReferencingHelper.getLinearValueFromStationBean(pointBean);

      if (oldValue != value) {
        try {
          if (!isFeatureChangeLocked()) {
            MAPPING_COMPONENT.getFeatureCollection().select(getFeature());
          }
          if (!isBeanChangeLocked()) {
            LinearReferencingHelper.setLinearValueToStationBean(
                (double) Math.round(value), pointBean);
          }
        } catch (Exception ex) {
          if (LOG.isDebugEnabled()) {
            LOG.debug("error changing bean", ex);
          }
        }
      } else {
        if (LOG.isDebugEnabled()) {
          LOG.debug("no changes needed, old value was " + oldValue);
        }
      }
    }
  }
  /** DOCUMENT ME! */
  private void spinnerChanged() {
    if (LOG.isDebugEnabled()) {
      LOG.debug("spinner changed", new CurrentStackTrace());
    }

    try {
      lockSpinnerChange(true);

      final AbstractFormatter formatter =
          ((JSpinner.DefaultEditor) getValueSpinner().getEditor()).getTextField().getFormatter();
      final String text =
          ((JSpinner.DefaultEditor) getValueSpinner().getEditor()).getTextField().getText();
      if (!text.isEmpty()) {
        try {
          setPointValue((Double) formatter.stringToValue(text));
        } catch (ParseException ex) {
          if (LOG.isDebugEnabled()) {
            LOG.debug("error parsing spinner", ex);
          }
        }
      }
    } finally {
      lockSpinnerChange(false);
    }
  }
Esempio n. 10
0
    @Override
    public Void call() throws IOException {
      IOException toThrow = null;
      StatisticsCollectionRunTracker collectionTracker =
          StatisticsCollectionRunTracker.getInstance(config);
      final HRegionInfo regionInfo = region.getRegionInfo();
      try {
        // update the statistics table
        // Just verify if this if fine
        ArrayList<Mutation> mutations = new ArrayList<Mutation>();

        if (LOG.isDebugEnabled()) {
          LOG.debug(
              "Deleting the stats for the region "
                  + regionInfo.getRegionNameAsString()
                  + " as part of major compaction");
        }
        stats.deleteStats(region, tracker, family, mutations);
        if (LOG.isDebugEnabled()) {
          LOG.debug(
              "Adding new stats for the region "
                  + regionInfo.getRegionNameAsString()
                  + " as part of major compaction");
        }
        stats.addStats(tracker, family, mutations);
        if (LOG.isDebugEnabled()) {
          LOG.debug(
              "Committing new stats for the region "
                  + regionInfo.getRegionNameAsString()
                  + " as part of major compaction");
        }
        stats.commitStats(mutations);
      } catch (IOException e) {
        LOG.error("Failed to update statistics table!", e);
        toThrow = e;
      } finally {
        try {
          collectionTracker.removeCompactingRegion(regionInfo);
          stats.close(); // close the writer
          tracker.close(); // close the tracker
        } catch (IOException e) {
          if (toThrow == null) toThrow = e;
          LOG.error("Error while closing the stats table", e);
        } finally {
          // close the delegate scanner
          try {
            delegate.close();
          } catch (IOException e) {
            if (toThrow == null) toThrow = e;
            LOG.error("Error while closing the scanner", e);
          } finally {
            if (toThrow != null) {
              throw toThrow;
            }
          }
        }
      }
      return null;
    }
Esempio n. 11
0
 @Override
 public int update(AmPermission amPermission) {
   if (LOG.isDebugEnabled()) {
     LOG.debug(">>>>parameter:" + amPermission);
   }
   int result =
       getSqlSession()
           .update("org.framework.authorize.auth.mapper.AmPermissionMapper.update", amPermission);
   if (LOG.isDebugEnabled()) {
     LOG.debug(">>>>result:" + result);
   }
   return result;
 }
Esempio n. 12
0
 @Override
 public AmPermission getById(String permissionId) {
   if (LOG.isDebugEnabled()) {
     LOG.debug(">>>>parameter:" + permissionId);
   }
   AmPermission result =
       getSqlSession()
           .selectOne(
               "org.framework.authorize.auth.mapper.AmPermissionMapper.getById", permissionId);
   if (LOG.isDebugEnabled()) {
     LOG.debug(">>>>result:" + result);
   }
   return result;
 }
    public boolean addResource(String url, String origin, String referrer) {
      // We start the push period here and not when initializing the main resource, because a
      // browser with a
      // prefilled cache won't request the subresources. If the browser with warmed up cache now
      // hits the main
      // resource after a server restart, the push period shouldn't start until the first
      // subresource is
      // being requested.
      firstResourceAdded.compareAndSet(-1, System.nanoTime());

      long delay = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - firstResourceAdded.get());
      if (!referrer.startsWith(origin) && !isPushOriginAllowed(origin)) {
        if (LOG.isDebugEnabled())
          LOG.debug(
              "Skipped store of push metadata {} for {}: Origin: {} doesn't match or origin not allowed",
              url,
              name,
              origin);
        return false;
      }

      // This check is not strictly concurrent-safe, but limiting
      // the number of associated resources is achieved anyway
      // although in rare cases few more resources will be stored
      if (resources.size() >= maxAssociatedResources) {
        if (LOG.isDebugEnabled())
          LOG.debug(
              "Skipped store of push metadata {} for {}: max associated resources ({}) reached",
              url,
              name,
              maxAssociatedResources);
        return false;
      }
      if (delay > referrerPushPeriod) {
        if (LOG.isDebugEnabled())
          LOG.debug(
              "Delay: {}ms longer than referrerPushPeriod ({}ms). Not adding resource: {} for: {}",
              delay,
              referrerPushPeriod,
              url,
              name);
        return false;
      }

      if (LOG.isDebugEnabled()) LOG.debug("Adding: {} to: {} with delay: {}ms.", url, this, delay);
      resources.add(url);
      return true;
    }
Esempio n. 14
0
    /**
     * Remove the node from the runnable indices
     *
     * @param node node to remove
     */
    public void deleteRunnable(ClusterNode node) {
      String host = node.getHost();

      if (LOG.isDebugEnabled()) {
        LOG.debug(node.getName() + " deleted from runnable list for type: " + type);
      }

      NodeContainer nodeContainer = hostToRunnableNodes.get(host);
      if (nodeContainer != null) {
        synchronized (nodeContainer) {
          if (nodeContainer.removeNode(node)) {
            /**
             * We are not removing the nodeContainer from runnable nodes map since we are
             * synchronizing operations with runnable indices on it
             */
            hostsWithRunnableNodes.decrementAndGet();
          }
        }
      }

      Node rack = node.hostNode.getParent();

      nodeContainer = rackToRunnableNodes.get(rack);
      if (nodeContainer != null) {
        synchronized (nodeContainer) {
          /**
           * We are not removing the nodeContainer from runnable nodes map since we are
           * synchronizing operations with runnable indices on it
           */
          nodeContainer.removeNode(node);
        }
      }
    }
    public void run() {
      zkLeader.start();
      zkLeader.waitToBecomeLeader();
      isMaster = true;

      while (!stopped) {
        long now = EnvironmentEdgeManager.currentTime();

        // clear any expired
        removeExpiredKeys();

        if (lastKeyUpdate + keyUpdateInterval < now) {
          // roll a new master key
          rollCurrentKey();
        }

        try {
          Thread.sleep(5000);
        } catch (InterruptedException ie) {
          if (LOG.isDebugEnabled()) {
            LOG.debug("Interrupted waiting for next update", ie);
          }
        }
      }
    }
  /**
   * This method to check whether 'lookupreturn' is specified if lookupclass is selected.
   *
   * @param maintenanceDocument
   * @return
   */
  private boolean checkLookupReturn(MaintenanceDocument maintenanceDocument) {
    if (LOG.isDebugEnabled()) {
      LOG.debug(
          "New maintainable is: " + maintenanceDocument.getNewMaintainableObject().getClass());
    }
    ProposalColumnsToAlter newEditableProposalField =
        (ProposalColumnsToAlter) maintenanceDocument.getNewMaintainableObject().getDataObject();

    if (StringUtils.isNotBlank(newEditableProposalField.getLookupClass())) {
      GlobalVariables.getUserSession()
          .addObject(
              Constants.LOOKUP_CLASS_NAME, (Object) newEditableProposalField.getLookupClass());
    }
    if (StringUtils.isNotBlank(newEditableProposalField.getLookupClass())
        && StringUtils.isBlank(newEditableProposalField.getLookupReturn())) {
      GlobalVariables.getMessageMap()
          .putError(
              Constants.PROPOSAL_EDITABLECOLUMN_LOOKUPRETURN,
              RiceKeyConstants.ERROR_REQUIRED,
              new String[] {"Lookup Return"});
      return false;
    }

    return true;
  }
 void start() throws InterruptedException {
   if (dependencies.size() > 0) {
     synchronized (this) {
       while (!canStart) {
         this.wait(1000 * 60 * 3L);
         if (dependenciesFailed) {
           throw new TezUncheckedException(
               "Skipping service start for "
                   + service.getName()
                   + " as dependencies failed to start");
         }
       }
     }
   }
   if (LOG.isDebugEnabled()) {
     LOG.debug("Service: " + service.getName() + " trying to start");
   }
   for (Service dependency : dependencies) {
     if (!dependency.isInState(Service.STATE.STARTED)) {
       LOG.info(
           "Service: "
               + service.getName()
               + " not started because "
               + " service: "
               + dependency.getName()
               + " is in state: "
               + dependency.getServiceState());
       return;
     }
   }
   service.start();
 }
Esempio n. 18
0
  /**
   * This method is to check the existence of budgetcategorycode in table.
   *
   * @param maintenanceDocument
   * @return
   */
  private boolean checkExistence(MaintenanceDocument maintenanceDocument) {

    boolean valid = true;
    if (LOG.isDebugEnabled()) {
      LOG.debug(
          "new maintainable is: " + maintenanceDocument.getNewMaintainableObject().getClass());
    }
    // shared by budgetcategorymapping & costelement
    // TODO : refactoring this - must have a better way to handle this sharing
    String budgetCategoryCode;
    if (maintenanceDocument.getNewMaintainableObject().getDataObject()
        instanceof BudgetCategoryMapping) {
      BudgetCategoryMapping budgetCategoryMapping =
          (BudgetCategoryMapping) maintenanceDocument.getNewMaintainableObject().getDataObject();
      budgetCategoryCode = budgetCategoryMapping.getBudgetCategoryCode();
    } else {
      CostElement costElement =
          (CostElement) maintenanceDocument.getNewMaintainableObject().getDataObject();
      budgetCategoryCode = costElement.getBudgetCategoryCode();
    }
    Map pkMap = new HashMap();
    pkMap.put("code", budgetCategoryCode);
    valid = checkExistenceFromTable(BudgetCategory.class, pkMap, "code", "Budget Category");

    return valid;
  }
 /**
  *
  *
  * <pre>
  * Region-unittest,\x82\xB4\x85\xC2\x7F\xFF\xFE\xB6\xC9jNG\xEE!\x5C3\xBB\xAE\xA1:\x05\xA5\xA9x\xB0\xA1"8\x05\xFB(\xD2VY\xDB\x9A\x06\x09\xA9\x98\xC2\xE3\x8D=,1413960230654.aaf2a6c9f2c87c196f43497243bb2424.
  * RegionID-unittest,1413960230654
  * </pre>
  */
 protected String getLogHeader() {
   HRegion region = this.getCurrentRegion();
   return LOG.isDebugEnabled()
       ? String.format("Region-%s", region.getRegionNameAsString())
       : String.format(
           "Region-%s,%d", region.getTableDesc().getNameAsString(), region.getRegionId());
 }
    @Override
    public ByteBuffer next() {
      try {
        if (channel == null) {
          channel = Files.newByteChannel(filePath, StandardOpenOption.READ);
          LOG.debug("Opened file {}", filePath);
        }

        buffer.clear();
        int read = channel.read(buffer);
        if (read < 0) throw new NoSuchElementException();

        if (LOG.isDebugEnabled()) LOG.debug("Read {} bytes from {}", read, filePath);

        position += read;

        if (!hasNext()) close();

        buffer.flip();
        return buffer;
      } catch (NoSuchElementException x) {
        close();
        throw x;
      } catch (Exception x) {
        close();
        throw (NoSuchElementException) new NoSuchElementException().initCause(x);
      }
    }
 protected boolean mkOneDirWithMode(Path p, File p2f, FsPermission permission) throws IOException {
   if (permission == null) {
     return p2f.mkdir();
   } else {
     if (Shell.WINDOWS && NativeIO.isAvailable()) {
       try {
         NativeIO.Windows.createDirectoryWithMode(p2f, permission.toShort());
         return true;
       } catch (IOException e) {
         if (LOG.isDebugEnabled()) {
           LOG.debug(
               String.format(
                   "NativeIO.createDirectoryWithMode error, path = %s, mode = %o",
                   p2f, permission.toShort()),
               e);
         }
         return false;
       }
     } else {
       boolean b = p2f.mkdir();
       if (b) {
         setPermission(p, permission);
       }
       return b;
     }
   }
 }
  @Override
  public void exchangeTerminated(HttpExchange exchange, Result result) {
    super.exchangeTerminated(exchange, result);

    Response response = result.getResponse();
    HttpFields responseHeaders = response.getHeaders();

    String closeReason = null;
    if (result.isFailed()) closeReason = "failure";
    else if (receiver.isShutdown()) closeReason = "server close";

    if (closeReason == null) {
      if (response.getVersion().compareTo(HttpVersion.HTTP_1_1) < 0) {
        // HTTP 1.0 must close the connection unless it has
        // an explicit keep alive or it's a CONNECT method.
        boolean keepAlive =
            responseHeaders.contains(HttpHeader.CONNECTION, HttpHeaderValue.KEEP_ALIVE.asString());
        boolean connect = HttpMethod.CONNECT.is(exchange.getRequest().getMethod());
        if (!keepAlive && !connect) closeReason = "http/1.0";
      } else {
        // HTTP 1.1 or greater closes only if it has an explicit close.
        if (responseHeaders.contains(HttpHeader.CONNECTION, HttpHeaderValue.CLOSE.asString()))
          closeReason = "http/1.1";
      }
    }

    if (closeReason != null) {
      if (LOG.isDebugEnabled()) LOG.debug("Closing, reason: {} - {}", closeReason, connection);
      connection.close();
    } else {
      if (response.getStatus() == HttpStatus.SWITCHING_PROTOCOLS_101) connection.remove();
      else release();
    }
  }
Esempio n. 23
0
    /** Evaluate using classic parameter binding using the pre compute expression */
    private Object evaluateParameterBinding(
        Exchange exchange, Expression expression, int index, Class<?> parameterType) {
      Object answer = null;

      // use object first to avoid type conversion so we know if there is a value or not
      Object result = expression.evaluate(exchange, Object.class);
      if (result != null) {
        try {
          if (parameterType.isInstance(result)) {
            // optimize if the value is already the same type
            answer = result;
          } else {
            // we got a value now try to convert it to the expected type
            answer =
                exchange.getContext().getTypeConverter().mandatoryConvertTo(parameterType, result);
          }
          if (LOG.isTraceEnabled()) {
            LOG.trace(
                "Parameter #{} evaluated as: {} type: ",
                new Object[] {index, answer, ObjectHelper.type(answer)});
          }
        } catch (NoTypeConversionAvailableException e) {
          if (LOG.isDebugEnabled()) {
            LOG.debug(
                "Cannot convert from type: {} to type: {} for parameter #{}",
                new Object[] {ObjectHelper.type(result), parameterType, index});
          }
          throw new ParameterBindingException(e, method, index, parameterType, result);
        }
      } else {
        LOG.trace("Parameter #{} evaluated as null", index);
      }

      return answer;
    }
  /*
   * check if custom attribute id is valid
   */
  private boolean checkCustomAttributeExist(MaintenanceDocument maintenanceDocument) {

    if (LOG.isDebugEnabled()) {
      LOG.debug(
          "new maintainable is: " + maintenanceDocument.getNewMaintainableObject().getClass());
    }
    CustomAttributeDocument newCustomAttributeDocument =
        (CustomAttributeDocument) maintenanceDocument.getNewMaintainableObject().getDataObject();

    if (newCustomAttributeDocument.getCustomAttributeId() != null) {
      Map<String, String> queryMap = new HashMap<String, String>();
      queryMap.put("id", newCustomAttributeDocument.getCustomAttributeId().toString());

      if (getBoService().countMatching(CustomAttribute.class, queryMap) == 0) {
        GlobalVariables.getMessageMap()
            .putError(
                Constants.DOCUMENT_NEWMAINTAINABLEOBJECT_CUSTOM_ATTRIBUTE_ID,
                KeyConstants.ERROR_INVALID_CUSTOM_ATT_ID,
                new String[] {newCustomAttributeDocument.getCustomAttributeId().toString()});
        return false;
      }
    }

    return true;
  }
Esempio n. 25
0
  @Override
  protected void onStateChanged() {
    boolean isLeader = isLeader();
    boolean hasLeader = hasLeader();
    changeSupport.onLeadershipChange(isLeader, hasLeader);
    treeChangeSupport.onLeadershipChange(isLeader, hasLeader);

    // If this actor is no longer the leader close all the transaction chains
    if (!isLeader) {
      if (LOG.isDebugEnabled()) {
        LOG.debug(
            "{}: onStateChanged: Closing all transaction chains because shard {} is no longer the leader",
            persistenceId(),
            getId());
      }

      store.closeAllTransactionChains();

      commitCoordinator.abortPendingTransactions(
          "The transacton was aborted due to inflight leadership change.", this);
    }

    if (hasLeader && !isIsolatedLeader()) {
      messageRetrySupport.retryMessages();
    }
  }
  @Override
  public void beansDropped(final ArrayList<CidsBean> beans) {
    if (isEditable()) {
      CidsBean routeBean = null;
      for (final CidsBean bean : beans) {
        if (bean.getMetaObject().getMetaClass().getName().equals(CN_ROUTE)) {
          if ((getDropBehavior() == null) || getDropBehavior().checkForAdding(routeBean)) {
            routeBean = bean;
            setChangedSinceDrop(false);
          }
          double value = 0d;

          if (isFirstStationInCurrentBB()) {
            value = getPointInCurrentBB(routeBean);
          }

          setCidsBean(LinearReferencingHelper.createStationBeanFromRouteBean(routeBean, value));
          if (isAutoZoomActivated) {
            MapUtil.zoomToFeatureCollection(getZoomFeatures());
          }
          return;
        }
      }
      if (LOG.isDebugEnabled()) {
        LOG.debug("no route found in dropped objects");
      }
    }
  }
Esempio n. 27
0
    private Parser detectParser(Record record) {
      if (!hasAtLeastOneMimeType(record)) {
        return null;
      }
      String mediaTypeStr =
          (String)
              record.getFirstValue(Fields.ATTACHMENT_MIME_TYPE); // ExtractingParams.STREAM_TYPE);
      assert mediaTypeStr != null;

      MediaType mediaType = parseMediaType(mediaTypeStr).getBaseType();
      Parser parser = mediaTypeToParserMap.get(mediaType); // fast path
      if (parser != null) {
        return parser;
      }
      // wildcard matching
      for (Map.Entry<MediaType, Parser> entry : mediaTypeToParserMap.entrySet()) {
        if (isMediaTypeMatch(mediaType, entry.getKey())) {
          return entry.getValue();
        }
      }
      if (LOG.isDebugEnabled()) {
        LOG.debug(
            "No supported MIME type parser found for "
                + Fields.ATTACHMENT_MIME_TYPE
                + "="
                + mediaTypeStr);
      }
      return null;
    }
Esempio n. 28
0
    /* Initiates a call by sending the parameter to the remote server.
     * Note: this is not called from the Connection thread, but by other
     * threads.
     */
    protected void sendParam(Call call) {
      if (shouldCloseConnection.get()) {
        return;
      }

      // For serializing the data to be written.

      final DataOutputBuffer d = new DataOutputBuffer();
      try {
        if (LOG.isDebugEnabled()) LOG.debug(getName() + " sending #" + call.id);

        d.writeInt(0xdeadbeef); // placeholder for data length
        d.writeInt(call.id);
        call.param.write(d);
        byte[] data = d.getData();
        int dataLength = d.getLength();
        // fill in the placeholder
        Bytes.putInt(data, 0, dataLength - 4);
        //noinspection SynchronizeOnNonFinalField
        synchronized (this.out) { // FindBugs IS2_INCONSISTENT_SYNC
          out.write(data, 0, dataLength);
          out.flush();
        }
      } catch (IOException e) {
        markClosed(e);
      } finally {
        // the buffer is just an in-memory buffer, but it is still polite to
        // close early
        IOUtils.closeStream(d);
      }
    }
Esempio n. 29
0
    @Override
    protected Action process() throws Exception {
      // Only return if EOF has previously been read and thus
      // a write done with EOF=true
      if (_eof) {
        if (LOG.isDebugEnabled()) LOG.debug("EOF of {}", this);
        // Handle EOF
        _in.close();
        closed();
        _channel.getByteBufferPool().release(_buffer);
        return Action.SUCCEEDED;
      }

      // Read until buffer full or EOF
      int len = 0;
      while (len < _buffer.capacity() && !_eof) {
        int r = _in.read(_buffer.array(), _buffer.arrayOffset() + len, _buffer.capacity() - len);
        if (r < 0) _eof = true;
        else len += r;
      }

      // write what we have
      _buffer.position(0);
      _buffer.limit(len);
      write(_buffer, _eof, this);
      return Action.SCHEDULED;
    }
Esempio n. 30
0
  @Override
  public void onReceiveRecover(final Object message) throws Exception {
    if (LOG.isDebugEnabled()) {
      LOG.debug(
          "{}: onReceiveRecover: Received message {} from {}",
          persistenceId(),
          message.getClass().toString(),
          getSender());
    }

    if (message instanceof RecoveryFailure) {
      LOG.error(
          "{}: Recovery failed because of this cause",
          persistenceId(),
          ((RecoveryFailure) message).cause());

      // Even though recovery failed, we still need to finish our recovery, eg send the
      // ActorInitialized message and start the txCommitTimeoutCheckSchedule.
      onRecoveryComplete();
    } else {
      super.onReceiveRecover(message);
      if (LOG.isTraceEnabled()) {
        appendEntriesReplyTracker.begin();
      }
    }
  }