Exemplo n.º 1
0
 private synchronized void checkQuorumWhenAdded(final String nodeID, final long start) {
   if (clusterMap.containsKey(nodeID)) {
     checkQuorum();
   } else {
     vertx.setTimer(
         200,
         tid -> {
           // This can block on a monitor so it needs to run as a worker
           vertx.executeBlockingInternal(
               () -> {
                 if (System.currentTimeMillis() - start > 10000) {
                   log.warn("Timed out waiting for group information to appear");
                 } else if (!stopped) {
                   ContextImpl context = vertx.getContext();
                   try {
                     // Remove any context we have here (from the timer) otherwise will screw
                     // things up when verticles are deployed
                     ContextImpl.setContext(null);
                     checkQuorumWhenAdded(nodeID, start);
                   } finally {
                     ContextImpl.setContext(context);
                   }
                 }
                 return null;
               },
               null);
         });
   }
 }
Exemplo n.º 2
0
 // Add the deployment to an internal list of deploymentIDs - these will be executed when a quorum
 // is attained
 private void addToHADeployList(
     final String verticleName,
     final DeploymentOptions deploymentOptions,
     final Handler<AsyncResult<String>> doneHandler) {
   toDeployOnQuorum.add(
       () -> {
         ContextImpl ctx = vertx.getContext();
         try {
           ContextImpl.setContext(null);
           deployVerticle(verticleName, deploymentOptions, doneHandler);
         } finally {
           ContextImpl.setContext(ctx);
         }
       });
 }
Exemplo n.º 3
0
 // Process the failover of a deployment
 private void processFailover(JsonObject failedVerticle) {
   if (failDuringFailover) {
     throw new VertxException("Oops!");
   }
   // This method must block until the failover is complete - i.e. the verticle is successfully
   // redeployed
   final String verticleName = failedVerticle.getString("verticle_name");
   final CountDownLatch latch = new CountDownLatch(1);
   final AtomicReference<Throwable> err = new AtomicReference<>();
   // Now deploy this verticle on this node
   ContextImpl ctx = vertx.getContext();
   if (ctx != null) {
     // We could be on main thread in which case we don't want to overwrite tccl
     ContextImpl.setContext(null);
   }
   JsonObject options = failedVerticle.getJsonObject("options");
   try {
     doDeployVerticle(
         verticleName,
         new DeploymentOptions(options),
         result -> {
           if (result.succeeded()) {
             log.info("Successfully redeployed verticle " + verticleName + " after failover");
           } else {
             log.error("Failed to redeploy verticle after failover", result.cause());
             err.set(result.cause());
           }
           latch.countDown();
           Throwable t = err.get();
           if (t != null) {
             throw new VertxException(t);
           }
         });
   } finally {
     if (ctx != null) {
       ContextImpl.setContext(ctx);
     }
   }
   try {
     if (!latch.await(120, TimeUnit.SECONDS)) {
       throw new VertxException("Timed out waiting for redeploy on failover");
     }
   } catch (InterruptedException e) {
     throw new IllegalStateException(e);
   }
 }
Exemplo n.º 4
0
 // Undeploy any HA deploymentIDs now there is no quorum
 private void undeployHADeployments() {
   for (String deploymentID : deploymentManager.deployments()) {
     Deployment dep = deploymentManager.getDeployment(deploymentID);
     if (dep != null) {
       if (dep.deploymentOptions().isHa()) {
         ContextImpl ctx = vertx.getContext();
         try {
           ContextImpl.setContext(null);
           deploymentManager.undeployVerticle(
               deploymentID,
               result -> {
                 if (result.succeeded()) {
                   log.info(
                       "Successfully undeployed HA deployment "
                           + deploymentID
                           + "-"
                           + dep.verticleIdentifier()
                           + " as there is no quorum");
                   addToHADeployList(
                       dep.verticleIdentifier(),
                       dep.deploymentOptions(),
                       result1 -> {
                         if (result1.succeeded()) {
                           log.info(
                               "Successfully redeployed verticle "
                                   + dep.verticleIdentifier()
                                   + " after quorum was re-attained");
                         } else {
                           log.error(
                               "Failed to redeploy verticle "
                                   + dep.verticleIdentifier()
                                   + " after quorum was re-attained",
                               result1.cause());
                         }
                       });
                 } else {
                   log.error("Failed to undeploy deployment on lost quorum", result.cause());
                 }
               });
         } finally {
           ContextImpl.setContext(ctx);
         }
       }
     }
   }
 }
Exemplo n.º 5
0
  @SuppressWarnings("unchecked")
  private void buildDocument(
      VariableResolverImpl vr,
      DocWrapper doc,
      Map<String, Object> pk,
      DataConfig.Entity entity,
      boolean isRoot,
      ContextImpl parentCtx) {

    EntityProcessorWrapper entityProcessor = getEntityProcessor(entity);

    ContextImpl ctx =
        new ContextImpl(
            entity,
            vr,
            null,
            pk == null ? Context.FULL_DUMP : Context.DELTA_DUMP,
            session,
            parentCtx,
            this);
    entityProcessor.init(ctx);
    Context.CURRENT_CONTEXT.set(ctx);

    if (requestParameters.start > 0) {
      writer.log(SolrWriter.DISABLE_LOGGING, null, null);
    }

    if (verboseDebug) {
      writer.log(SolrWriter.START_ENTITY, entity.name, null);
    }

    int seenDocCount = 0;

    try {
      while (true) {
        if (stop.get()) return;
        if (importStatistics.docCount.get() > (requestParameters.start + requestParameters.rows))
          break;
        try {
          seenDocCount++;

          if (seenDocCount > requestParameters.start) {
            writer.log(SolrWriter.ENABLE_LOGGING, null, null);
          }

          if (verboseDebug && entity.isDocRoot) {
            writer.log(SolrWriter.START_DOC, entity.name, null);
          }
          if (doc == null && entity.isDocRoot) {
            doc = new DocWrapper();
            ctx.setDoc(doc);
            DataConfig.Entity e = entity;
            while (e.parentEntity != null) {
              addFields(
                  e.parentEntity, doc, (Map<String, Object>) vr.resolve(e.parentEntity.name), vr);
              e = e.parentEntity;
            }
          }

          Map<String, Object> arow = entityProcessor.nextRow();
          if (arow == null) {
            break;
          }

          // Support for start parameter in debug mode
          if (entity.isDocRoot) {
            if (seenDocCount <= requestParameters.start) continue;
            if (seenDocCount > requestParameters.start + requestParameters.rows) {
              LOG.info("Indexing stopped at docCount = " + importStatistics.docCount);
              break;
            }
          }

          if (verboseDebug) {
            writer.log(SolrWriter.ENTITY_OUT, entity.name, arow);
          }
          importStatistics.rowsCount.incrementAndGet();
          if (doc != null) {
            handleSpecialCommands(arow, doc);
            addFields(entity, doc, arow, vr);
          }
          if (entity.entities != null) {
            vr.addNamespace(entity.name, arow);
            for (DataConfig.Entity child : entity.entities) {
              buildDocument(vr, doc, child.isDocRoot ? pk : null, child, false, ctx);
            }
            vr.removeNamespace(entity.name);
          }
          /*The child entities would have changed the CURRENT_CONTEXT. So when they are done, set it back to the old.
           *
           */
          Context.CURRENT_CONTEXT.set(ctx);

          if (entity.isDocRoot) {
            if (stop.get()) return;
            if (!doc.isEmpty()) {
              boolean result = writer.upload(doc);
              doc = null;
              if (result) {
                importStatistics.docCount.incrementAndGet();
              } else {
                importStatistics.failedDocCount.incrementAndGet();
              }
            }
          }

        } catch (DataImportHandlerException e) {
          if (verboseDebug) {
            writer.log(SolrWriter.ENTITY_EXCEPTION, entity.name, e);
          }
          if (e.getErrCode() == DataImportHandlerException.SKIP_ROW) {
            continue;
          }
          if (isRoot) {
            if (e.getErrCode() == DataImportHandlerException.SKIP) {
              importStatistics.skipDocCount.getAndIncrement();
              doc = null;
            } else {
              LOG.error("Exception while processing: " + entity.name + " document : " + doc, e);
            }
            if (e.getErrCode() == DataImportHandlerException.SEVERE) throw e;
          } else throw e;
        } catch (Throwable t) {
          if (verboseDebug) {
            writer.log(SolrWriter.ENTITY_EXCEPTION, entity.name, t);
          }
          throw new DataImportHandlerException(DataImportHandlerException.SEVERE, t);
        } finally {
          if (verboseDebug) {
            writer.log(SolrWriter.ROW_END, entity.name, null);
            if (entity.isDocRoot) writer.log(SolrWriter.END_DOC, null, null);
            Context.CURRENT_CONTEXT.remove();
          }
        }
      }
    } finally {
      if (verboseDebug) {
        writer.log(SolrWriter.END_ENTITY, null, null);
      }
      entityProcessor.destroy();
    }
  }
Exemplo n.º 6
0
 private void runAThread(ThreadedEntityProcessorWrapper epw, EntityRow rows, String currProcess)
     throws Exception {
   currentEntityProcWrapper.set(epw);
   epw.threadedInit(context);
   initEntity();
   try {
     epw.init(rows);
     DocWrapper docWrapper = this.docWrapper;
     Context.CURRENT_CONTEXT.set(context);
     for (; ; ) {
       if (DocBuilder.this.stop.get()) break;
       try {
         Map<String, Object> arow = epw.nextRow();
         if (arow == null) {
           break;
         } else {
           importStatistics.rowsCount.incrementAndGet();
           if (docWrapper == null && entity.isDocRoot) {
             docWrapper = new DocWrapper();
             context.setDoc(docWrapper);
             DataConfig.Entity e = entity.parentEntity;
             for (EntityRow row = rows;
                 row != null && e != null;
                 row = row.tail, e = e.parentEntity) {
               addFields(e, docWrapper, row.row, epw.resolver);
             }
           }
           if (docWrapper != null) {
             handleSpecialCommands(arow, docWrapper);
             addFields(entity, docWrapper, arow, epw.resolver);
           }
           if (entity.entities != null) {
             EntityRow nextRow = new EntityRow(arow, rows, entity.name);
             for (DataConfig.Entity e : entity.entities) {
               epw.children.get(e).run(docWrapper, currProcess, nextRow);
             }
           }
         }
         if (entity.isDocRoot) {
           LOG.info("a row on docroot" + docWrapper);
           if (!docWrapper.isEmpty()) {
             LOG.info("adding a doc " + docWrapper);
             boolean result = writer.upload(docWrapper);
             docWrapper = null;
             if (result) {
               importStatistics.docCount.incrementAndGet();
             } else {
               importStatistics.failedDocCount.incrementAndGet();
             }
           }
         }
       } catch (DataImportHandlerException dihe) {
         exception = dihe;
         if (dihe.getErrCode() == SKIP_ROW || dihe.getErrCode() == SKIP) {
           importStatistics.skipDocCount.getAndIncrement();
           exception = null; // should not propogate up
           continue;
         }
         if (entity.isDocRoot) {
           if (dihe.getErrCode() == DataImportHandlerException.SKIP) {
             importStatistics.skipDocCount.getAndIncrement();
             exception = null; // should not propogate up
           } else {
             LOG.error(
                 "Exception while processing: " + entity.name + " document : " + docWrapper,
                 dihe);
           }
           if (dihe.getErrCode() == DataImportHandlerException.SEVERE) throw dihe;
         } else {
           // if this is not the docRoot then the execution has happened in the same thread. so
           // propogate up,
           // it will be handled at the docroot
           entityEnded.set(true);
           throw dihe;
         }
         entityEnded.set(true);
       }
     }
   } finally {
     epw.destroy();
     currentEntityProcWrapper.remove();
     Context.CURRENT_CONTEXT.remove();
   }
 }