Пример #1
0
  public boolean sync(
      ZkController zkController,
      SolrCore core,
      ZkNodeProps leaderProps,
      boolean peerSyncOnlyWithActive) {
    if (SKIP_AUTO_RECOVERY) {
      return true;
    }
    boolean success;
    SolrQueryRequest req = new LocalSolrQueryRequest(core, new ModifiableSolrParams());
    SolrQueryResponse rsp = new SolrQueryResponse();
    SolrRequestInfo.setRequestInfo(new SolrRequestInfo(req, rsp));
    try {
      if (isClosed) {
        log.warn("Closed, skipping sync up.");
        return false;
      }
      log.info("Sync replicas to " + ZkCoreNodeProps.getCoreUrl(leaderProps));

      if (core.getUpdateHandler().getUpdateLog() == null) {
        log.error("No UpdateLog found - cannot sync");
        return false;
      }

      success = syncReplicas(zkController, core, leaderProps, peerSyncOnlyWithActive);
    } finally {
      SolrRequestInfo.clearRequestInfo();
    }
    return success;
  }
Пример #2
0
  public static SolrInputDocument getInputDocument(SolrCore core, BytesRef idBytes)
      throws IOException {
    SolrInputDocument sid = null;
    RefCounted<SolrIndexSearcher> searcherHolder = null;
    try {
      SolrIndexSearcher searcher = null;
      UpdateLog ulog = core.getUpdateHandler().getUpdateLog();

      if (ulog != null) {
        Object o = ulog.lookup(idBytes);
        if (o != null) {
          // should currently be a List<Oper,Ver,Doc/Id>
          List entry = (List) o;
          assert entry.size() >= 3;
          int oper = (Integer) entry.get(0) & UpdateLog.OPERATION_MASK;
          switch (oper) {
            case UpdateLog.ADD:
              sid = (SolrInputDocument) entry.get(entry.size() - 1);
              break;
            case UpdateLog.DELETE:
              return null;
            default:
              throw new SolrException(
                  SolrException.ErrorCode.SERVER_ERROR, "Unknown Operation! " + oper);
          }
        }
      }

      if (sid == null) {
        // didn't find it in the update log, so it should be in the newest searcher opened
        if (searcher == null) {
          searcherHolder = core.getRealtimeSearcher();
          searcher = searcherHolder.get();
        }

        // SolrCore.verbose("RealTimeGet using searcher ", searcher);
        SchemaField idField = core.getLatestSchema().getUniqueKeyField();

        int docid = searcher.getFirstMatch(new Term(idField.getName(), idBytes));
        if (docid < 0) return null;
        StoredDocument luceneDocument = searcher.doc(docid);
        sid = toSolrInputDocument(luceneDocument, core.getLatestSchema());
      }
    } finally {
      if (searcherHolder != null) {
        searcherHolder.decref();
      }
    }

    return sid;
  }
Пример #3
0
 @Override
 public void deleteAll() {
   DeleteUpdateCommand cmd = new DeleteUpdateCommand(makeSolrQueryRequest());
   /*
   cmd.fromCommitted = true;
   cmd.fromPending = true;
   */
   cmd.query = "*:*";
   try {
     if (isCloud()) {
       deleteCloud(cmd);
     } else {
       core.getUpdateHandler().deleteByQuery(cmd);
     }
   } catch (IOException e) {
     throw new LuxException(e);
   }
 }
Пример #4
0
  public void testMaxDocs() throws Exception {
    SolrCore core = h.getCore();

    NewSearcherListener trigger = new NewSearcherListener();

    DirectUpdateHandler2 updateHandler = (DirectUpdateHandler2) core.getUpdateHandler();
    CommitTracker tracker = updateHandler.softCommitTracker;
    tracker.setTimeUpperBound(-1);
    tracker.setDocsUpperBound(14);
    core.registerNewSearcherListener(trigger);

    UpdateRequestHandler handler = new UpdateRequestHandler();
    handler.init(null);

    MapSolrParams params = new MapSolrParams(new HashMap<String, String>());

    // Add documents
    SolrQueryResponse rsp = new SolrQueryResponse();
    SolrQueryRequestBase req = new SolrQueryRequestBase(core, params) {};
    for (int i = 0; i < 14; i++) {
      req.setContentStreams(
          toContentStreams(adoc("id", Integer.toString(i), "subject", "info"), null));
      handler.handleRequest(req, rsp);
    }
    // It should not be there right away
    assertQ("shouldn't find any", req("id:1"), "//result[@numFound=0]");
    assertEquals(0, tracker.getCommitCount());

    req.setContentStreams(toContentStreams(adoc("id", "14", "subject", "info"), null));
    handler.handleRequest(req, rsp);

    assertTrue(trigger.waitForNewSearcher(15000));

    req.setContentStreams(toContentStreams(adoc("id", "15", "subject", "info"), null));
    handler.handleRequest(req, rsp);

    // Now make sure we can find it
    assertQ("should find one", req("id:14"), "//result[@numFound=1]");
    assertEquals(1, tracker.getCommitCount());
    // But not the one added afterward
    assertQ("should not find one", req("id:15"), "//result[@numFound=0]");
    assertEquals(1, tracker.getCommitCount());
  }
Пример #5
0
 private void writeLocal(SolrInputDocument solrDoc, NodeInfo node, String uri) {
   XmlIndexer indexer = null;
   try {
     indexer = xqueryComponent.getSolrIndexConfig().checkoutXmlIndexer();
     try {
       indexer.index(node, uri);
     } catch (XMLStreamException e) {
       throw new LuxException(e);
     }
     UpdateDocCommand cmd = new UpdateDocCommand(core, indexer.createLuceneDocument(), uri);
     cmd.solrDoc = solrDoc;
     core.getUpdateHandler().addDoc(cmd);
   } catch (IOException e) {
     throw new LuxException(e);
   } finally {
     if (indexer != null) {
       xqueryComponent.getSolrIndexConfig().returnXmlIndexer(indexer);
     }
   }
 }
Пример #6
0
  private boolean syncWithReplicas(
      ZkController zkController,
      SolrCore core,
      ZkNodeProps props,
      String collection,
      String shardId,
      boolean peerSyncOnlyWithActive) {
    List<ZkCoreNodeProps> nodes =
        zkController
            .getZkStateReader()
            .getReplicaProps(
                collection,
                shardId,
                core.getCoreDescriptor().getCloudDescriptor().getCoreNodeName());

    if (nodes == null) {
      // I have no replicas
      return true;
    }

    List<String> syncWith = new ArrayList<>();
    for (ZkCoreNodeProps node : nodes) {
      syncWith.add(node.getCoreUrl());
    }

    // if we can't reach a replica for sync, we still consider the overall sync a success
    // TODO: as an assurance, we should still try and tell the sync nodes that we couldn't reach
    // to recover once more?
    PeerSync peerSync =
        new PeerSync(
            core,
            syncWith,
            core.getUpdateHandler().getUpdateLog().getNumRecordsToKeep(),
            true,
            true,
            peerSyncOnlyWithActive);
    return peerSync.sync();
  }
Пример #7
0
  @Override
  public void write(NodeInfo node, String uri) {
    UpdateHandler updateHandler = core.getUpdateHandler();

    // Create a version of the document for saving to the transaction log,
    // or for cloud update via HTTP
    SolrInputDocument solrDoc = new SolrInputDocument();
    solrDoc.addField(uriFieldName, uri);
    if (isCloud()) {
      // TODO: write as binary, but we need to enable the binary update request writer for this
      // TinyBinary tinybin = new TinyBinary(((TinyNodeImpl)node).getTree());
      // solrDoc.addField(xmlFieldName, tinybin.getByteBuffer().array());
      Serializer serializer = xqueryComponent.solrIndexConfig.checkoutSerializer();
      try {
        String xmlString = serializer.serializeNodeToString(new XdmNode(node));
        solrDoc.addField(xmlFieldName, xmlString);
      } catch (SaxonApiException e) {
        throw new LuxException(e);
      } finally {
        xqueryComponent.solrIndexConfig.returnSerializer(serializer);
      }
      // TODO -- if we can determine this doc only gets added locally??
      // solrDoc.addField(xmlFieldName, node);
    } else if (updateHandler.getUpdateLog() != null) {
      if (node instanceof TinyNodeImpl) {
        TinyBinary tinybin = new TinyBinary(((TinyNodeImpl) node).getTree());
        solrDoc.addField(xmlFieldName, tinybin.getByteBuffer());
      } else {
        String xml = node.toString();
        solrDoc.addField(xmlFieldName, xml);
      }
    }
    if (isCloud()) {
      writeToCloud(solrDoc, uri);
    } else {
      writeLocal(solrDoc, node, uri);
    }
  }
Пример #8
0
  private boolean doRestore() throws Exception {

    Path backupPath = Paths.get(backupLocation).resolve(backupName);
    SimpleDateFormat dateFormat = new SimpleDateFormat(SnapShooter.DATE_FMT, Locale.ROOT);
    String restoreIndexName = "restore." + dateFormat.format(new Date());
    String restoreIndexPath = core.getDataDir() + restoreIndexName;

    Directory restoreIndexDir = null;
    Directory indexDir = null;
    try (Directory backupDir = FSDirectory.open(backupPath)) {

      final Version version =
          IndexFetcher.checkOldestVersion(SegmentInfos.readLatestCommit(backupDir));

      restoreIndexDir =
          core.getDirectoryFactory()
              .get(
                  restoreIndexPath,
                  DirectoryFactory.DirContext.DEFAULT,
                  core.getSolrConfig().indexConfig.lockType);

      // Prefer local copy.
      indexDir =
          core.getDirectoryFactory()
              .get(
                  core.getIndexDir(),
                  DirectoryFactory.DirContext.DEFAULT,
                  core.getSolrConfig().indexConfig.lockType);

      // Move all files from backupDir to restoreIndexDir
      for (String filename : backupDir.listAll()) {
        checkInterrupted();
        log.info("Copying file {} to restore directory ", filename);
        try (IndexInput indexInput = backupDir.openInput(filename, IOContext.READONCE)) {
          Long checksum = null;
          try {
            checksum = CodecUtil.retrieveChecksum(indexInput);
          } catch (Exception e) {
            log.warn("Could not read checksum from index file: " + filename, e);
          }
          long length = indexInput.length();
          IndexFetcher.CompareResult compareResult =
              IndexFetcher.compareFile(indexDir, version, filename, length, checksum);
          if (!compareResult.equal
              || (!compareResult.checkSummed
                  && (filename.endsWith(".si")
                      || filename.endsWith(".liv")
                      || filename.startsWith("segments_")))) {
            restoreIndexDir.copyFrom(backupDir, filename, filename, IOContext.READONCE);
          } else {
            // prefer local copy
            restoreIndexDir.copyFrom(indexDir, filename, filename, IOContext.READONCE);
          }
        } catch (Exception e) {
          throw new SolrException(
              SolrException.ErrorCode.UNKNOWN, "Exception while restoring the backup index", e);
        }
      }
      log.debug("Switching directories");
      IndexFetcher.modifyIndexProps(core, restoreIndexName);

      boolean success;
      try {
        core.getUpdateHandler().newIndexWriter(false);
        openNewSearcher();
        success = true;
        log.info("Successfully restored to the backup index");
      } catch (Exception e) {
        // Rollback to the old index directory. Delete the restore index directory and mark the
        // restore as failed.
        log.warn("Could not switch to restored index. Rolling back to the current index");
        Directory dir = null;
        try {
          dir =
              core.getDirectoryFactory()
                  .get(
                      core.getDataDir(),
                      DirectoryFactory.DirContext.META_DATA,
                      core.getSolrConfig().indexConfig.lockType);
          dir.deleteFile(IndexFetcher.INDEX_PROPERTIES);
        } finally {
          if (dir != null) {
            core.getDirectoryFactory().release(dir);
          }
        }

        core.getDirectoryFactory().doneWithDirectory(restoreIndexDir);
        core.getDirectoryFactory().remove(restoreIndexDir);
        core.getUpdateHandler().newIndexWriter(false);
        openNewSearcher();
        throw new SolrException(
            SolrException.ErrorCode.UNKNOWN, "Exception while restoring the backup index", e);
      }
      if (success) {
        core.getDirectoryFactory().doneWithDirectory(indexDir);
        core.getDirectoryFactory().remove(indexDir);
      }

      return true;
    } finally {
      if (restoreIndexDir != null) {
        core.getDirectoryFactory().release(restoreIndexDir);
      }
      if (indexDir != null) {
        core.getDirectoryFactory().release(indexDir);
      }
    }
  }
Пример #9
0
  public void testCommitWithin() throws Exception {
    SolrCore core = h.getCore();

    NewSearcherListener trigger = new NewSearcherListener();
    core.registerNewSearcherListener(trigger);
    DirectUpdateHandler2 updater = (DirectUpdateHandler2) core.getUpdateHandler();
    CommitTracker tracker = updater.softCommitTracker;
    tracker.setTimeUpperBound(0);
    tracker.setDocsUpperBound(-1);

    UpdateRequestHandler handler = new UpdateRequestHandler();
    handler.init(null);

    MapSolrParams params = new MapSolrParams(new HashMap<String, String>());

    // Add a single document with commitWithin == 4 second
    SolrQueryResponse rsp = new SolrQueryResponse();
    SolrQueryRequestBase req = new SolrQueryRequestBase(core, params) {};
    req.setContentStreams(
        toContentStreams(
            adoc(4000, "id", "529", "field_t", "what's inside?", "subject", "info"), null));
    trigger.reset();
    handler.handleRequest(req, rsp);

    // Check it isn't in the index
    assertQ("shouldn't find any", req("id:529"), "//result[@numFound=0]");

    // Wait longer than the commitWithin time
    assertTrue("commitWithin failed to commit", trigger.waitForNewSearcher(30000));

    // Add one document without commitWithin
    req.setContentStreams(
        toContentStreams(adoc("id", "530", "field_t", "what's inside?", "subject", "info"), null));
    trigger.reset();
    handler.handleRequest(req, rsp);

    // Check it isn't in the index
    assertQ("shouldn't find any", req("id:530"), "//result[@numFound=0]");

    // Delete one document with commitWithin
    req.setContentStreams(toContentStreams(delI("529", "commitWithin", "1000"), null));
    trigger.reset();
    handler.handleRequest(req, rsp);

    // Now make sure we can find it
    assertQ("should find one", req("id:529"), "//result[@numFound=1]");

    // Wait for the commit to happen
    assertTrue("commitWithin failed to commit", trigger.waitForNewSearcher(30000));

    // Now we shouldn't find it
    assertQ("should find none", req("id:529"), "//result[@numFound=0]");
    // ... but we should find the new one
    assertQ("should find one", req("id:530"), "//result[@numFound=1]");

    trigger.reset();

    // now make the call 10 times really fast and make sure it
    // only commits once
    req.setContentStreams(toContentStreams(adoc(2000, "id", "500"), null));
    for (int i = 0; i < 10; i++) {
      handler.handleRequest(req, rsp);
    }
    assertQ("should not be there yet", req("id:500"), "//result[@numFound=0]");

    // the same for the delete
    req.setContentStreams(toContentStreams(delI("530", "commitWithin", "1000"), null));
    for (int i = 0; i < 10; i++) {
      handler.handleRequest(req, rsp);
    }
    assertQ("should be there", req("id:530"), "//result[@numFound=1]");

    assertTrue("commitWithin failed to commit", trigger.waitForNewSearcher(30000));
    assertQ("should be there", req("id:500"), "//result[@numFound=1]");
    assertQ("should not be there", req("id:530"), "//result[@numFound=0]");

    assertEquals(3, tracker.getCommitCount());
  }
Пример #10
0
  public void testMaxTime() throws Exception {
    SolrCore core = h.getCore();

    NewSearcherListener trigger = new NewSearcherListener();
    core.registerNewSearcherListener(trigger);
    DirectUpdateHandler2 updater = (DirectUpdateHandler2) core.getUpdateHandler();
    CommitTracker tracker = updater.softCommitTracker;
    // too low of a number can cause a slow host to commit before the test code checks that it
    // isn't there... causing a failure at "shouldn't find any"
    tracker.setTimeUpperBound(1000);
    tracker.setDocsUpperBound(-1);
    // updater.commitCallbacks.add(trigger);

    UpdateRequestHandler handler = new UpdateRequestHandler();
    handler.init(null);

    MapSolrParams params = new MapSolrParams(new HashMap<String, String>());

    // Add a single document
    SolrQueryResponse rsp = new SolrQueryResponse();
    SolrQueryRequestBase req = new SolrQueryRequestBase(core, params) {};
    req.setContentStreams(
        toContentStreams(adoc("id", "529", "field_t", "what's inside?", "subject", "info"), null));
    trigger.reset();
    handler.handleRequest(req, rsp);

    // Check it it is in the index
    assertQ("shouldn't find any", req("id:529"), "//result[@numFound=0]");

    // Wait longer than the autocommit time
    assertTrue(trigger.waitForNewSearcher(45000));
    trigger.reset();
    req.setContentStreams(
        toContentStreams(adoc("id", "530", "field_t", "what's inside?", "subject", "info"), null));
    handler.handleRequest(req, rsp);

    // Now make sure we can find it
    assertQ("should find one", req("id:529"), "//result[@numFound=1]");
    // But not this one
    assertQ("should find none", req("id:530"), "//result[@numFound=0]");

    // Delete the document
    assertU(delI("529"));
    assertQ("deleted, but should still be there", req("id:529"), "//result[@numFound=1]");
    // Wait longer than the autocommit time
    assertTrue(trigger.waitForNewSearcher(30000));
    trigger.reset();
    req.setContentStreams(
        toContentStreams(adoc("id", "550", "field_t", "what's inside?", "subject", "info"), null));
    handler.handleRequest(req, rsp);
    assertEquals(2, tracker.getCommitCount());
    assertQ("deleted and time has passed", req("id:529"), "//result[@numFound=0]");

    // now make the call 10 times really fast and make sure it
    // only commits once
    req.setContentStreams(toContentStreams(adoc("id", "500"), null));
    for (int i = 0; i < 10; i++) {
      handler.handleRequest(req, rsp);
    }
    assertQ("should not be there yet", req("id:500"), "//result[@numFound=0]");

    // Wait longer than the autocommit time
    assertTrue(trigger.waitForNewSearcher(45000));
    trigger.reset();

    req.setContentStreams(
        toContentStreams(adoc("id", "531", "field_t", "what's inside?", "subject", "info"), null));
    handler.handleRequest(req, rsp);
    assertEquals(3, tracker.getCommitCount());

    assertQ("now it should", req("id:500"), "//result[@numFound=1]");
  }
Пример #11
0
  @Override
  public void process(ResponseBuilder rb) throws IOException {
    SolrQueryRequest req = rb.req;
    SolrQueryResponse rsp = rb.rsp;
    SolrParams params = req.getParams();

    if (!params.getBool(COMPONENT_NAME, true)) {
      return;
    }

    String val = params.get("getVersions");
    if (val != null) {
      processGetVersions(rb);
      return;
    }

    val = params.get("getUpdates");
    if (val != null) {
      processGetUpdates(rb);
      return;
    }

    String id[] = params.getParams("id");
    String ids[] = params.getParams("ids");

    if (id == null && ids == null) {
      return;
    }

    String[] allIds = id == null ? new String[0] : id;

    if (ids != null) {
      List<String> lst = new ArrayList<String>();
      for (String s : allIds) {
        lst.add(s);
      }
      for (String idList : ids) {
        lst.addAll(StrUtils.splitSmart(idList, ",", true));
      }
      allIds = lst.toArray(new String[lst.size()]);
    }

    SolrCore core = req.getCore();
    SchemaField idField = core.getLatestSchema().getUniqueKeyField();
    FieldType fieldType = idField.getType();

    SolrDocumentList docList = new SolrDocumentList();
    UpdateLog ulog = core.getUpdateHandler().getUpdateLog();

    RefCounted<SolrIndexSearcher> searcherHolder = null;

    DocTransformer transformer = rsp.getReturnFields().getTransformer();
    if (transformer != null) {
      TransformContext context = new TransformContext();
      context.req = req;
      transformer.setContext(context);
    }
    try {
      SolrIndexSearcher searcher = null;

      BytesRef idBytes = new BytesRef();
      for (String idStr : allIds) {
        fieldType.readableToIndexed(idStr, idBytes);
        if (ulog != null) {
          Object o = ulog.lookup(idBytes);
          if (o != null) {
            // should currently be a List<Oper,Ver,Doc/Id>
            List entry = (List) o;
            assert entry.size() >= 3;
            int oper = (Integer) entry.get(0) & UpdateLog.OPERATION_MASK;
            switch (oper) {
              case UpdateLog.ADD:
                SolrDocument doc =
                    toSolrDoc(
                        (SolrInputDocument) entry.get(entry.size() - 1), core.getLatestSchema());
                if (transformer != null) {
                  transformer.transform(doc, -1); // unknown docID
                }
                docList.add(doc);
                break;
              case UpdateLog.DELETE:
                break;
              default:
                throw new SolrException(
                    SolrException.ErrorCode.SERVER_ERROR, "Unknown Operation! " + oper);
            }
            continue;
          }
        }

        // didn't find it in the update log, so it should be in the newest searcher opened
        if (searcher == null) {
          searcherHolder = core.getRealtimeSearcher();
          searcher = searcherHolder.get();
        }

        // SolrCore.verbose("RealTimeGet using searcher ", searcher);

        int docid = searcher.getFirstMatch(new Term(idField.getName(), idBytes));
        if (docid < 0) continue;
        StoredDocument luceneDocument = searcher.doc(docid);
        SolrDocument doc = toSolrDoc(luceneDocument, core.getLatestSchema());
        if (transformer != null) {
          transformer.transform(doc, docid);
        }
        docList.add(doc);
      }

    } finally {
      if (searcherHolder != null) {
        searcherHolder.decref();
      }
    }

    // if the client specified a single id=foo, then use "doc":{
    // otherwise use a standard doclist

    if (ids == null && allIds.length <= 1) {
      // if the doc was not found, then use a value of null.
      rsp.add("doc", docList.size() > 0 ? docList.get(0) : null);
    } else {
      docList.setNumFound(docList.size());
      rsp.add("response", docList);
    }
  }