Esempio n. 1
0
 WALPutBenchmark(
     final HRegion region,
     final HTableDescriptor htd,
     final long numIterations,
     final boolean noSync,
     final int syncInterval,
     final double traceFreq) {
   this.numIterations = numIterations;
   this.noSync = noSync;
   this.syncInterval = syncInterval;
   this.numFamilies = htd.getColumnFamilies().length;
   this.region = region;
   this.htd = htd;
   String spanReceivers = getConf().get("hbase.trace.spanreceiver.classes");
   if (spanReceivers == null || spanReceivers.isEmpty()) {
     loopSampler = Sampler.NEVER;
   } else {
     if (traceFreq <= 0.0) {
       LOG.warn("Tracing enabled but traceFreq=0.");
       loopSampler = Sampler.NEVER;
     } else if (traceFreq >= 1.0) {
       loopSampler = Sampler.ALWAYS;
       if (numIterations > 1000) {
         LOG.warn(
             "Full tracing of all iterations will produce a lot of data. Be sure your"
                 + " SpanReciever can keep up.");
       }
     } else {
       getConf().setDouble("hbase.sampler.fraction", traceFreq);
       loopSampler = new ProbabilitySampler(new HBaseHTraceConfiguration(getConf()));
     }
   }
 }
  @Override
  protected SalesforceException createRestException(ContentExchange httpExchange) {
    // try parsing response according to format
    try {
      if ("json".equals(format)) {
        List<RestError> restErrors =
            objectMapper.readValue(
                httpExchange.getResponseContent(), new TypeReference<List<RestError>>() {});
        return new SalesforceException(restErrors, httpExchange.getResponseStatus());
      } else {
        RestErrors errors = new RestErrors();
        xStream.fromXML(httpExchange.getResponseContent(), errors);
        return new SalesforceException(errors.getErrors(), httpExchange.getResponseStatus());
      }
    } catch (IOException e) {
      // log and ignore
      String msg = "Unexpected Error parsing " + format + " error response: " + e.getMessage();
      LOG.warn(msg, e);
    } catch (RuntimeException e) {
      // log and ignore
      String msg = "Unexpected Error parsing " + format + " error response: " + e.getMessage();
      LOG.warn(msg, e);
    }

    // just report HTTP status info
    return new SalesforceException("Unexpected error", httpExchange.getResponseStatus());
  }
Esempio n. 3
0
 @Override
 public String toVersionRange(String version) {
   int digits = ServiceConstants.DEFAULT_VERSION_DIGITS;
   String value =
       classPathResolver.getManifestProperty(ServiceConstants.INSTR_FAB_VERSION_RANGE_DIGITS);
   if (notEmpty(value)) {
     try {
       digits = Integer.parseInt(value);
     } catch (NumberFormatException e) {
       LOG.warn(
           "Failed to parse manifest header "
               + ServiceConstants.INSTR_FAB_VERSION_RANGE_DIGITS
               + " as a number. Got: '"
               + value
               + "' so ignoring it");
     }
     if (digits < 0 || digits > 4) {
       LOG.warn(
           "Invalid value of manifest header "
               + ServiceConstants.INSTR_FAB_VERSION_RANGE_DIGITS
               + " as value "
               + digits
               + " is out of range so ignoring it");
       digits = ServiceConstants.DEFAULT_VERSION_DIGITS;
     }
   }
   return Versions.toVersionRange(version, digits);
 }
Esempio n. 4
0
    synchronized void add(Range failedRange) {
      LOG.warn("FailedRange:" + failedRange);
      if (divide != null) {
        LOG.warn(
            "FailedRange:" + failedRange + "  test:" + divide.test + "  pass:" + divide.testPassed);
        if (divide.testPassed) {
          // test range passed
          // other range would be bad. test it
          failedRange = divide.other;
        } else {
          // test range failed
          // other range would be good.
          failedRange = divide.test;
        }
        // reset
        divide = null;
      }

      if (maxSkipRecords == 0 || failedRange.getLength() <= maxSkipRecords) {
        skipRanges.add(failedRange);
      } else {
        // start dividing the range to narrow down the skipped
        // records until maxSkipRecords are met OR all attempts
        // get exhausted
        divide = new Divide(failedRange);
      }
    }
    @Override
    public void run() {
      threadCnt.incrementAndGet();
      try {
        /**
         * If there is nothing in the queue to send, then we send the lastMessage to ensure that the
         * last message was received by the peer. The message could be dropped in case self or the
         * peer shutdown their connection (and exit the thread) prior to reading/processing the last
         * message. Duplicate messages are handled correctly by the peer.
         *
         * <p>If the send queue is non-empty, then we have a recent message than that stored in
         * lastMessage. To avoid sending stale message, we should send the message in the send
         * queue.
         */
        ArrayBlockingQueue<ByteBuffer> bq = queueSendMap.get(sid);
        if (bq == null || isSendQueueEmpty(bq)) {
          ByteBuffer b = lastMessageSent.get(sid);
          if (b != null) {
            LOG.debug("Attempting to send lastMessage to sid=" + sid);
            send(b);
          }
        }
      } catch (IOException e) {
        LOG.error("Failed to send last message. Shutting down thread.", e);
        this.finish();
      }

      try {
        while (running && !shutdown && sock != null) {

          ByteBuffer b = null;
          try {
            ArrayBlockingQueue<ByteBuffer> bq = queueSendMap.get(sid);
            if (bq != null) {
              b = pollSendQueue(bq, 1000, TimeUnit.MILLISECONDS);
            } else {
              LOG.error("No queue of incoming messages for " + "server " + sid);
              break;
            }

            if (b != null) {
              lastMessageSent.put(sid, b);
              send(b);
            }
          } catch (InterruptedException e) {
            LOG.warn("Interrupted while waiting for message on queue", e);
          }
        }
      } catch (Exception e) {
        LOG.warn(
            "Exception when using channel: for id "
                + sid
                + " my id = "
                + self.getId()
                + " error = "
                + e);
      }
      this.finish();
      LOG.warn("Send worker leaving thread");
    }
Esempio n. 6
0
  private final void verify() {
    ListContainer list;
    final Iterator<ListContainer> iter = _entries.values().iterator();
    while (iter.hasNext()) {
      list = iter.next();

      for (Entry ent : list.getEntries()) {
        for (Ingredient ing : ent.getIngredients()) {
          if (!verifyIngredient(ing)) {
            LOG.warn(
                "{}: Cannot find ingredient with item ID: {} in list: {}!",
                getClass().getSimpleName(),
                ing.getItemId(),
                list.getListId());
          }
        }
        for (Ingredient ing : ent.getProducts()) {
          if (!verifyIngredient(ing)) {
            LOG.warn(
                "{}: Cannot find product with item ID: {} in list: {}!",
                getClass().getSimpleName(),
                ing.getItemId(),
                list.getListId());
          }
        }
      }
    }
  }
    @Override
    public STATE onHeadersReceived(HttpResponseHeaders headers) throws Exception {
      switch (this.httpStatus) {
        case 301:
        case 302:
          String redirect = headers.getHeaders().getFirstValue("Location");
          if (redirect.contains("protected_redirect=true")) {
            LOG.warn("Abandoning protected account: " + url);
            connections.decrementAndGet();
          } else if (redirect.contains("account/suspended")) {
            LOG.warn("Abandoning suspended account: " + url);
            connections.decrementAndGet();
          } else if (followRedirects) {
            // LOG.warn("Following redirect: " + url);
            crawlURL(
                redirect,
                new TweetFetcherHandler(id, username, redirect, numRetries, followRedirects));
          } else {
            LOG.warn("Abandoning redirect: " + url);
            connections.decrementAndGet();
          }
          return STATE.ABORT;
      }

      return super.onHeadersReceived(headers);
    }
Esempio n. 8
0
  @Override
  public void parseDocument(Document doc, File f) {
    try {
      int id = Integer.parseInt(f.getName().replaceAll(".xml", ""));
      int entryId = 1;
      Node att;
      final ListContainer list = new ListContainer(id);

      for (Node n = doc.getFirstChild(); n != null; n = n.getNextSibling()) {
        if ("list".equalsIgnoreCase(n.getNodeName())) {
          att = n.getAttributes().getNamedItem("applyTaxes");
          list.setApplyTaxes((att != null) && Boolean.parseBoolean(att.getNodeValue()));

          att = n.getAttributes().getNamedItem("useRate");
          if (att != null) {
            try {

              list.setUseRate(Double.valueOf(att.getNodeValue()));
              if (list.getUseRate() <= 1e-6) {
                throw new NumberFormatException(
                    "The value cannot be 0"); // threat 0 as invalid value
              }
            } catch (NumberFormatException e) {
              try {
                list.setUseRate(Config.class.getField(att.getNodeValue()).getDouble(Config.class));
              } catch (Exception e1) {
                LOG.warn(
                    "{}: Unable to parse {}", getClass().getSimpleName(), doc.getLocalName(), e1);
                list.setUseRate(1.0);
              }

            } catch (DOMException e) {
              LOG.warn("{}: Unable to parse {}", getClass().getSimpleName(), doc.getLocalName(), e);
            }
          }

          att = n.getAttributes().getNamedItem("maintainEnchantment");
          list.setMaintainEnchantment((att != null) && Boolean.parseBoolean(att.getNodeValue()));

          for (Node d = n.getFirstChild(); d != null; d = d.getNextSibling()) {
            if ("item".equalsIgnoreCase(d.getNodeName())) {
              Entry e = parseEntry(d, entryId++, list);
              list.getEntries().add(e);
            } else if ("npcs".equalsIgnoreCase(d.getNodeName())) {
              for (Node b = d.getFirstChild(); b != null; b = b.getNextSibling()) {
                if ("npc".equalsIgnoreCase(b.getNodeName())) {
                  if (Util.isDigit(b.getTextContent())) {
                    list.allowNpc(Integer.parseInt(b.getTextContent()));
                  }
                }
              }
            }
          }
        }
      }
      _entries.put(id, list);
    } catch (Exception e) {
      LOG.error("{}: Error in file {}", getClass().getSimpleName(), f, e);
    }
  }
 @Override
 public void run() {
   threadCnt.incrementAndGet();
   try {
     while (running && !shutdown && sock != null) {
       /** Reads the first int to determine the length of the message */
       int length = din.readInt();
       if (length <= 0 || length > PACKETMAXSIZE) {
         throw new IOException("Received packet with invalid packet: " + length);
       }
       /** Allocates a new ByteBuffer to receive the message */
       byte[] msgArray = new byte[length];
       din.readFully(msgArray, 0, length);
       ByteBuffer message = ByteBuffer.wrap(msgArray);
       addToRecvQueue(new Message(message.duplicate(), sid));
     }
   } catch (Exception e) {
     LOG.warn("Connection broken for id " + sid + ", my id = " + self.getId() + ", error = ", e);
   } finally {
     LOG.warn("Interrupting SendWorker");
     sw.finish();
     if (sock != null) {
       closeSocket(sock);
     }
   }
 }
Esempio n. 10
0
    public void handle(Event event) {
      if (blockNewEvents) {
        return;
      }
      drained = false;

      /* all this method does is enqueue all the events onto the queue */
      int qSize = eventQueue.size();
      if (qSize != 0 && qSize % 1000 == 0) {
        LOG.info("Size of event-queue is " + qSize);
      }
      int remCapacity = eventQueue.remainingCapacity();
      if (remCapacity < 1000) {
        LOG.warn("Very low remaining capacity in the event-queue: " + remCapacity);
      }
      try {
        eventQueue.put(event);
      } catch (InterruptedException e) {
        if (!stopped) {
          LOG.warn("AsyncDispatcher thread interrupted", e);
        }
        // Need to reset drained flag to true if event queue is empty,
        // otherwise dispatcher will hang on stop.
        drained = eventQueue.isEmpty();
        throw new YarnRuntimeException(e);
      }
    };
    @Override
    public void run() {
      try {
        final ProgressEvent status = watchable.requestStatus();
        final ProgressEvent progress =
            new ProgressEvent(
                watchable,
                status.getState(),
                status.getStep(),
                status.getMaxSteps(),
                status.getMessage());

        watchable.getStatusCallback().progress(progress);

        if (ProgressEvent.State.FINISHED == status.getState()) {
          if (LOG.isInfoEnabled()) {
            LOG.info(
                "progresswatch finished for watchable because status is FINISHED: "
                    + watchable); // NOI18N
          }

          if (deregister != null) {
            deregisterDispatcher.submit(deregister);
          }
        } else if (ProgressEvent.State.BROKEN == status.getState()) {
          if (LOG.isInfoEnabled()) {
            LOG.warn(
                "progresswatch finished for watchable because status is BROKEN: "
                    + watchable); // NOI18N
          }

          if (deregister != null) {
            deregisterDispatcher.submit(deregister);
          }
        }
      } catch (final IOException e) {
        retryCount++;
        if (retryCount > MAX_RETRIES) {
          LOG.error("error in status poll: " + watchable, e); // NOI18N

          final ProgressEvent progress = new ProgressEvent(watchable, State.BROKEN, e.getMessage());

          watchable.getStatusCallback().progress(progress);

          if (deregister != null) {
            deregisterDispatcher.submit(deregister);
          }
        } else {
          LOG.warn(
              "error in status poll, retrying (no. "
                  + retryCount
                  + "/"
                  + MAX_RETRIES
                  + "): " // NOI18N
                  + watchable,
              e);
        }
      }
    }
Esempio n. 12
0
 /**
  * the command that is run and retried for actually obtaining the lock
  *
  * @return if the command was successful or not
  */
 public boolean execute() throws KeeperException, InterruptedException {
   do {
     if (id == null) {
       long sessionId = zookeeper.getSessionId();
       String prefix = "x-" + sessionId + "-";
       // lets try look up the current ID if we failed
       // in the middle of creating the znode
       findPrefixInChildren(prefix, zookeeper, dir);
       idName = new ZNodeName(id);
     }
     if (id != null) {
       List<String> names = zookeeper.getChildren(dir, false);
       if (names.isEmpty()) {
         LOG.warn(
             "No children in: "
                 + dir
                 + " when we've just "
                 + "created one! Lets recreate it...");
         // lets force the recreation of the id
         id = null;
       } else {
         // lets sort them explicitly (though they do seem to come back in order ususally :)
         SortedSet<ZNodeName> sortedNames = new TreeSet<ZNodeName>();
         for (String name : names) {
           sortedNames.add(new ZNodeName(dir + "/" + name));
         }
         ownerId = sortedNames.first().getName();
         SortedSet<ZNodeName> lessThanMe = sortedNames.headSet(idName);
         if (!lessThanMe.isEmpty()) { // 存在比当前节点小的节点,在这些节点中监听最大的节点,目的是避免羊群效应
           ZNodeName lastChildName = lessThanMe.last();
           lastChildId = lastChildName.getName();
           if (LOG.isDebugEnabled()) {
             LOG.debug("watching less than me node: " + lastChildId);
           }
           Stat stat = zookeeper.exists(lastChildId, new LockWatcher());
           if (stat != null) {
             return Boolean.FALSE;
           } else {
             LOG.warn(
                 "Could not find the" + " stats for less than me: " + lastChildName.getName());
           }
         } else { // 如果不存在比当前节点小的节点,那么获得锁,调用callback回调执行
           if (isOwner()) { // 进一步根据id和ownerid判断是否获得锁
             if (callback != null) {
               callback.lockAcquired();
             }
             return Boolean.TRUE;
           }
         }
       }
     }
   } while (id == null);
   return Boolean.FALSE;
 }
Esempio n. 13
0
    /**
     * The communication thread handles communication with the parent (Task Tracker). It sends
     * progress updates if progress has been made or if the task needs to let the parent know that
     * it's alive. It also pings the parent to see if it's alive.
     */
    public void run() {
      final int MAX_RETRIES = 3;
      int remainingRetries = MAX_RETRIES;
      // get current flag value and reset it as well
      boolean sendProgress = resetProgressFlag();
      while (!taskDone.get()) {
        synchronized (lock) {
          done = false;
        }
        try {
          boolean taskFound = true; // whether TT knows about this task
          // sleep for a bit
          synchronized (lock) {
            if (taskDone.get()) {
              break;
            }
            lock.wait(PROGRESS_INTERVAL);
          }
          if (taskDone.get()) {
            break;
          }
          if (sendProgress) {
            updateCounters();
            attemptReport.setCounters(counters);
            delegate.statusUpdate(taskAttemptId, attemptReport);
          } else
            // send ping
            taskFound = delegate.ping(taskAttemptId);

          // if Task Tracker is not aware of our task ID (probably because it
          // died and
          // came back up), kill ourselves
          if (!taskFound) {
            LOG.warn("Parent died.  Exiting " + taskAttemptId);
            resetDoneFlag();
            System.exit(66);
          }
          sendProgress = resetProgressFlag();
          remainingRetries = MAX_RETRIES;
        } catch (Throwable t) {
          LOG.info("Communication exception: " + StringUtils.stringifyException(t));
          remainingRetries -= 1;
          if (remainingRetries == 0) {
            ReflectionUtils.logThreadInfo(LOG, "Communication exception", 0);
            LOG.warn("Last retry, killing " + taskAttemptId);
            resetDoneFlag();
            System.exit(65);
          }
        }
      }
      // Notify that we are done with the work
      resetDoneFlag();
    }
Esempio n. 14
0
 /**
  * Validate against Coordinator XSD file
  *
  * @param xmlContent : Input coordinator xml
  * @throws CoordinatorJobException thrown if unable to validate coordinator xml
  */
 private void validateXml(String xmlContent) throws CoordinatorJobException {
   javax.xml.validation.Schema schema =
       Services.get().get(SchemaService.class).getSchema(SchemaName.COORDINATOR);
   Validator validator = schema.newValidator();
   try {
     validator.validate(new StreamSource(new StringReader(xmlContent)));
   } catch (SAXException ex) {
     LOG.warn("SAXException :", ex);
     throw new CoordinatorJobException(ErrorCode.E0701, ex.getMessage(), ex);
   } catch (IOException ex) {
     LOG.warn("IOException :", ex);
     throw new CoordinatorJobException(ErrorCode.E0702, ex.getMessage(), ex);
   }
 }
Esempio n. 15
0
    @Override
    public List<Source> splitIntoBundles(long desiredBundleSizeBytes, PipelineOptions options)
        throws Exception {
      // Users may request a limit on the number of results. We can currently support this by
      // simply disabling parallel reads and using only a single split.
      if (query.hasLimit()) {
        return ImmutableList.of(this);
      }

      long numSplits;
      try {
        numSplits = Math.round(((double) getEstimatedSizeBytes(options)) / desiredBundleSizeBytes);
      } catch (Exception e) {
        // Fallback in case estimated size is unavailable. TODO: fix this, it's horrible.

        // 1. Try Dataflow's numWorkers, which will be 0 for other workers.
        DataflowPipelineWorkerPoolOptions poolOptions =
            options.as(DataflowPipelineWorkerPoolOptions.class);
        if (poolOptions.getNumWorkers() > 0) {
          LOG.warn(
              "Estimated size of unavailable, using the number of workers {}",
              poolOptions.getNumWorkers(),
              e);
          numSplits = poolOptions.getNumWorkers();
        } else {
          // 2. Default to 12 in the unknown case.
          numSplits = 12;
        }
      }

      // If the desiredBundleSize or number of workers results in 1 split, simply return
      // a source that reads from the original query.
      if (numSplits <= 1) {
        return ImmutableList.of(this);
      }

      List<Query> datastoreSplits;
      try {
        datastoreSplits = getSplitQueries(Ints.checkedCast(numSplits), options);
      } catch (IllegalArgumentException | DatastoreException e) {
        LOG.warn("Unable to parallelize the given query: {}", query, e);
        return ImmutableList.of(this);
      }

      ImmutableList.Builder<Source> splits = ImmutableList.builder();
      for (Query splitQuery : datastoreSplits) {
        splits.add(new Source(host, datasetId, splitQuery, namespace));
      }
      return splits.build();
    }
    @Override
    public void processTerminated(ProcessEvent event) {
      LOG.warn("Watcher terminated with exit code " + event.getExitCode());

      myProcessHandler = null;

      try {
        startupProcess(true);
      } catch (IOException e) {
        shutdownProcess();
        LOG.warn(
            "Watcher terminated and attempt to restart has failed. Exiting watching thread.", e);
      }
    }
Esempio n. 17
0
 @Override
 public Set<String> get() {
   Set<String> hosts = DBHOSTS.get();
   Set<String> union = Sets.newHashSet();
   Set<String> intersection = Sets.newHashSet(hosts);
   Logs.extreme().debug("ActiveHostSet: universe of db hosts: " + hosts);
   for (String ctx : PersistenceContexts.list()) {
     try {
       Set<String> activeDatabases = Databases.lookup(ctx, 0).getactiveDatabases();
       if (BootstrapArgs.isCloudController()) {
         activeDatabases.add(
             Internets
                 .localHostIdentifier()); // GRZE: use Internets.localHostIdentifier() which is
                                          // static, rather than the Hosts reference as it is
                                          // stateful
       }
       union.addAll(activeDatabases);
       intersection.retainAll(activeDatabases);
     } catch (Exception ex) {
     }
   }
   Logs.extreme().debug("ActiveHostSet: union of activated db connections: " + union);
   Logs.extreme()
       .debug(
           "ActiveHostSet: intersection of db hosts and activated db connections: "
               + intersection);
   boolean dbVolatile = !hosts.equals(intersection);
   String msg =
       String.format(
           "ActiveHostSet: %-14.14s %s%s%s",
           dbVolatile ? "volatile" : "synchronized",
           hosts,
           dbVolatile ? "!=" : "=",
           intersection);
   if (dbVolatile) {
     if (last.compareAndSet(false, dbVolatile)) {
       LOG.warn(msg);
     } else {
       LOG.debug(msg);
     }
   } else {
     if (last.compareAndSet(true, dbVolatile)) {
       LOG.warn(msg);
     } else {
       Logs.extreme().info(msg);
     }
   }
   return intersection;
 }
Esempio n. 18
0
 @Override
 public String getTaskAttemptLogUrl(
     JobConf conf, String taskTrackerHttpAddress, String taskAttemptId)
     throws MalformedURLException {
   if (conf.get("mapreduce.framework.name") != null
       && conf.get("mapreduce.framework.name").equals("yarn")) {
     // if the cluster is running in MR2 mode, return null
     LOG.warn("Can't fetch tasklog: TaskLogServlet is not supported in MR2 mode.");
     return null;
   } else {
     // Was using Hadoop-internal API to get tasklogs, disable until  MAPREDUCE-5857 is fixed.
     LOG.warn("Can't fetch tasklog: TaskLogServlet is not supported in MR1 mode.");
     return null;
   }
 }
    @Override
    public void leaseExpired() {
      LOG.info(
          "Transaction ["
              + this.transactionName
              + "] expired in region ["
              + getRegionInfo().getRegionNameAsString()
              + "]");
      TransactionState s = null;
      synchronized (transactionsById) {
        s = transactionsById.remove(transactionName);
      }
      if (s == null) {
        LOG.warn("Unknown transaction expired " + this.transactionName);
        return;
      }

      switch (s.getStatus()) {
        case PENDING:
          s.setStatus(Status.ABORTED); // Other transactions may have a ref
          break;
        case COMMIT_PENDING:
          LOG.info("Transaction " + s.getTransactionId() + " expired in COMMIT_PENDING state");

          try {
            if (s.getCommitPendingWaits() > MAX_COMMIT_PENDING_WAITS) {
              LOG.info("Checking transaction status in transaction log");
              resolveTransactionFromLog(s);
              break;
            }
            LOG.info("renewing lease and hoping for commit");
            s.incrementCommitPendingWaits();
            String key = Long.toString(s.getTransactionId());
            transactionsById.put(key, s);
            try {
              transactionLeases.createLease(getLeaseId(s.getTransactionId()), this);
            } catch (LeaseStillHeldException e) {
              transactionLeases.renewLease(getLeaseId(s.getTransactionId()));
            }
          } catch (IOException e) {
            throw new RuntimeException(e);
          }

          break;
        default:
          LOG.warn("Unexpected status on expired lease");
      }
    }
Esempio n. 20
0
 /**
  * Refresh an action's input and ouput events.
  *
  * @param coordJob coordinator job bean
  * @param coordAction coordinator action bean
  * @throws Exception thrown if failed to materialize coordinator action
  */
 private void refreshAction(CoordinatorJobBean coordJob, CoordinatorActionBean coordAction)
     throws Exception {
   Configuration jobConf = null;
   try {
     jobConf = new XConfiguration(new StringReader(coordJob.getConf()));
   } catch (IOException ioe) {
     LOG.warn("Configuration parse error. read from DB :" + coordJob.getConf(), ioe);
     throw new CommandException(ErrorCode.E1005, ioe.getMessage(), ioe);
   }
   String jobXml = coordJob.getJobXml();
   Element eJob = XmlUtils.parseXml(jobXml);
   Date actualTime = new Date();
   String actionXml =
       CoordCommandUtils.materializeOneInstance(
           jobId,
           dryrun,
           (Element) eJob.clone(),
           coordAction.getNominalTime(),
           actualTime,
           coordAction.getActionNumber(),
           jobConf,
           coordAction);
   LOG.debug(
       "Refresh Action actionId="
           + coordAction.getId()
           + ", actionXml="
           + XmlUtils.prettyPrint(actionXml).toString());
   coordAction.setActionXml(actionXml);
 }
Esempio n. 21
0
 @Override
 public void run() {
   spillLock.lock();
   spillThreadRunning = true;
   try {
     while (true) {
       spillDone.signal();
       while (!spillInProgress) {
         spillReady.await();
       }
       try {
         spillLock.unlock();
         sortAndSpill();
       } catch (Throwable t) {
         LOG.warn("Got an exception in sortAndSpill", t);
         sortSpillException = t;
       } finally {
         spillLock.lock();
         if (bufend < bufstart) {
           bufvoid = kvbuffer.length;
         }
         kvstart = kvend;
         bufstart = bufend;
         spillInProgress = false;
       }
     }
   } catch (InterruptedException e) {
     Thread.currentThread().interrupt();
   } finally {
     spillLock.unlock();
     spillThreadRunning = false;
   }
 }
    @Override
    public void run() {
      Thread current = Thread.currentThread();
      String name = current.getName();
      current.setName(name + "-acceptor-" + _acceptor + "-" + AbstractConnector.this);

      synchronized (AbstractConnector.this) {
        _acceptors[_acceptor] = current;
      }

      try {
        while (isAccepting()) {
          try {
            accept(_acceptor);
          } catch (Throwable e) {
            if (isAccepting()) LOG.warn(e);
            else LOG.debug(e);
          }
        }
      } finally {
        current.setName(name);

        synchronized (AbstractConnector.this) {
          _acceptors[_acceptor] = null;
        }
        _stopping.countDown();
      }
    }
    @Override
    void perform() throws IOException {
      HTableDescriptor selected = selectTable(disabledTables);
      HColumnDescriptor cfd = selectFamily(selected);
      if (selected == null || cfd == null) {
        return;
      }

      Admin admin = connection.getAdmin();
      try {
        if (selected.getColumnFamilies().length < 2) {
          LOG.info("No enough column families to delete in table " + selected.getTableName());
          return;
        }
        TableName tableName = selected.getTableName();
        LOG.info("Deleting column family: " + cfd + " from table: " + tableName);
        admin.deleteColumnFamily(tableName, cfd.getName());
        // assertion
        HTableDescriptor freshTableDesc = admin.getTableDescriptor(tableName);
        Assert.assertFalse(
            "Column family: " + cfd + " was not added", freshTableDesc.hasFamily(cfd.getName()));
        LOG.info("Deleted column family: " + cfd + " from table: " + tableName);
        disabledTables.put(tableName, freshTableDesc);
      } catch (Exception e) {
        LOG.warn("Caught exception in action: " + this.getClass());
        throw e;
      } finally {
        admin.close();
      }
      verifyTables();
    }
Esempio n. 24
0
 @Override
 public void handleUpstream(final ChannelHandlerContext ctx, final ChannelEvent e)
     throws Exception {
   final MappingHttpRequest request = MappingHttpMessage.extractMessage(e);
   final BaseMessage msg = BaseMessage.extractMessage(e);
   if (msg != null) {
     try {
       final Class<? extends ComponentId> compClass = ComponentMessages.lookup(msg);
       ComponentId compId = ComponentIds.lookup(compClass);
       if (compId.isAlwaysLocal() || Topology.isEnabledLocally(compClass)) {
         ctx.sendUpstream(e);
       } else {
         Handlers.sendRedirect(ctx, e, compClass, request);
       }
     } catch (final NoSuchElementException ex) {
       LOG.warn(
           "Failed to find reverse component mapping for message type: " + msg.getClass());
       ctx.sendUpstream(e);
     } catch (final Exception ex) {
       Logs.extreme().error(ex, ex);
       ctx.sendUpstream(e);
     }
   } else {
     ctx.sendUpstream(e);
   }
 }
Esempio n. 25
0
    @Override
    public void run() {
      while (!shutdown) {
        try {
          Thread.sleep(nodeExpiryInterval / 2);

          if (clusterManager.safeMode) {
            // Do nothing but sleep
            continue;
          }

          long now = ClusterManager.clock.getTime();
          for (ClusterNode node : nameToNode.values()) {
            if (now - node.lastHeartbeatTime > nodeExpiryInterval) {
              LOG.warn("Timing out node: " + node.getName());
              clusterManager.nodeTimeout(node.getName());
            }
          }

        } catch (InterruptedException iex) {
          // ignore. if shutting down, while cond. will catch it
          continue;
        }
      }
    }
    @Override
    void perform() throws IOException {
      NamespaceDescriptor selected = selectNamespace(namespaceMap);
      if (selected == null) {
        return;
      }

      Admin admin = connection.getAdmin();
      try {
        String namespaceName = selected.getName();
        LOG.info("Deleting namespace :" + selected);
        admin.deleteNamespace(namespaceName);
        try {
          if (admin.getNamespaceDescriptor(namespaceName) != null) {
            // the namespace still exists.
            Assert.assertTrue("Namespace: " + selected + " was not deleted", false);
          } else {
            LOG.info("Deleted namespace :" + selected);
          }
        } catch (NamespaceNotFoundException nsnfe) {
          // This is expected result
          LOG.info("Deleted namespace :" + selected);
        }
      } catch (Exception e) {
        LOG.warn("Caught exception in action: " + this.getClass());
        throw e;
      } finally {
        admin.close();
      }
      verifyNamespaces();
    }
Esempio n. 27
0
    public void run() {
      LOG.info("ClientSessionTimeoutCheckThread started");
      while (!queryMasterStop.get()) {
        try {
          Thread.sleep(1000);
        } catch (InterruptedException e) {
          break;
        }
        List<QueryMasterTask> tempTasks = new ArrayList<QueryMasterTask>();
        synchronized (queryMasterTasks) {
          tempTasks.addAll(queryMasterTasks.values());
        }

        for (QueryMasterTask eachTask : tempTasks) {
          if (!eachTask.isStopped()) {
            try {
              long lastHeartbeat = eachTask.getLastClientHeartbeat();
              long time = System.currentTimeMillis() - lastHeartbeat;
              if (lastHeartbeat > 0 && time > querySessionTimeout * 1000) {
                LOG.warn(
                    "Query "
                        + eachTask.getQueryId()
                        + " stopped cause query sesstion timeout: "
                        + time
                        + " ms");
                eachTask.expiredSessionTimeout();
              }
            } catch (Exception e) {
              LOG.error(eachTask.getQueryId() + ":" + e.getMessage(), e);
            }
          }
        }
      }
    }
 @Override
 void perform() throws IOException {
   Admin admin = connection.getAdmin();
   try {
     HTableDescriptor htd = createTableDesc();
     TableName tableName = htd.getTableName();
     if (admin.tableExists(tableName)) {
       return;
     }
     String numRegionKey = String.format(NUM_REGIONS_KEY, this.getClass().getSimpleName());
     numRegions = getConf().getInt(numRegionKey, DEFAULT_NUM_REGIONS);
     byte[] startKey = Bytes.toBytes("row-0000000000");
     byte[] endKey = Bytes.toBytes("row-" + Integer.MAX_VALUE);
     LOG.info("Creating table:" + htd);
     admin.createTable(htd, startKey, endKey, numRegions);
     Assert.assertTrue("Table: " + htd + " was not created", admin.tableExists(tableName));
     HTableDescriptor freshTableDesc = admin.getTableDescriptor(tableName);
     enabledTables.put(tableName, freshTableDesc);
     LOG.info("Created table:" + freshTableDesc);
   } catch (Exception e) {
     LOG.warn("Caught exception in action: " + this.getClass());
     throw e;
   } finally {
     admin.close();
   }
   verifyTables();
 }
Esempio n. 29
0
 @Override
 public Project getProject() {
   if (LOG.isEnabledFor(Priority.WARN)) {
     LOG.warn("Using getProject() from IOperationContext strictly prohibited");
   }
   return super.getProject();
 }
    @Override
    void perform() throws IOException {
      HTableDescriptor selected = selectTable(disabledTables);
      if (selected == null) {
        return;
      }

      Admin admin = connection.getAdmin();
      try {
        HColumnDescriptor cfd = createFamilyDesc();
        if (selected.hasFamily(cfd.getName())) {
          LOG.info(
              new String(cfd.getName()) + " already exists in table " + selected.getTableName());
          return;
        }
        TableName tableName = selected.getTableName();
        LOG.info("Adding column family: " + cfd + " to table: " + tableName);
        admin.addColumn(tableName, cfd);
        // assertion
        HTableDescriptor freshTableDesc = admin.getTableDescriptor(tableName);
        Assert.assertTrue(
            "Column family: " + cfd + " was not added", freshTableDesc.hasFamily(cfd.getName()));
        LOG.info("Added column family: " + cfd + " to table: " + tableName);
        disabledTables.put(tableName, freshTableDesc);
      } catch (Exception e) {
        LOG.warn("Caught exception in action: " + this.getClass());
        throw e;
      } finally {
        admin.close();
      }
      verifyTables();
    }