@Override
  public Document[] parse(
      final AnchorURL location,
      final String mimeType,
      final String charset,
      final VocabularyScraper scraper,
      final int timezoneOffset,
      final InputStream source)
      throws Parser.Failure, InterruptedException {

    // check memory for parser
    if (!MemoryControl.request(200 * 1024 * 1024, false))
      throw new Parser.Failure(
          "Not enough Memory available for pdf parser: " + MemoryControl.available(), location);

    // create a pdf parser
    PDDocument pdfDoc;
    try {
      Thread.currentThread().setPriority(Thread.MIN_PRIORITY); // the pdfparser is a big pain
      // pdfDoc = PDDocument.load(source);
      final PDFParser pdfParser = new PDFParser(source);
      pdfParser.setTempDirectory(new File(System.getProperty("java.io.tmpdir")));
      pdfParser.parse();
      pdfDoc = pdfParser.getPDDocument();
    } catch (final IOException e) {
      throw new Parser.Failure(e.getMessage(), location);
    } finally {
      Thread.currentThread().setPriority(Thread.NORM_PRIORITY);
    }

    if (pdfDoc.isEncrypted()) {
      try {
        pdfDoc.openProtection(new StandardDecryptionMaterial(""));
      } catch (final BadSecurityHandlerException e) {
        try {
          pdfDoc.close();
        } catch (final IOException ee) {
        }
        throw new Parser.Failure("Document is encrypted (1): " + e.getMessage(), location);
      } catch (final IOException e) {
        try {
          pdfDoc.close();
        } catch (final IOException ee) {
        }
        throw new Parser.Failure("Document is encrypted (2): " + e.getMessage(), location);
      } catch (final CryptographyException e) {
        try {
          pdfDoc.close();
        } catch (final IOException ee) {
        }
        throw new Parser.Failure("Document is encrypted (3): " + e.getMessage(), location);
      }
      final AccessPermission perm = pdfDoc.getCurrentAccessPermission();
      if (perm == null || !perm.canExtractContent()) {
        try {
          pdfDoc.close();
        } catch (final IOException ee) {
        }
        throw new Parser.Failure("Document is encrypted and cannot be decrypted", location);
      }
    }

    // extracting some metadata
    PDDocumentInformation info = pdfDoc.getDocumentInformation();
    String docTitle = null,
        docSubject = null,
        docAuthor = null,
        docPublisher = null,
        docKeywordStr = null;
    Date docDate = new Date();
    if (info != null) {
      docTitle = info.getTitle();
      docSubject = info.getSubject();
      docAuthor = info.getAuthor();
      docPublisher = info.getProducer();
      if (docPublisher == null || docPublisher.isEmpty()) docPublisher = info.getCreator();
      docKeywordStr = info.getKeywords();
      try {
        if (info.getModificationDate() != null) docDate = info.getModificationDate().getTime();
      } catch (IOException e) {
      }
      // unused:
      // info.getTrapped());
    }
    info = null;

    if (docTitle == null || docTitle.isEmpty()) {
      docTitle = MultiProtocolURL.unescape(location.getFileName());
    }
    if (docTitle == null) {
      docTitle = docSubject;
    }
    String[] docKeywords = null;
    if (docKeywordStr != null) {
      docKeywords = docKeywordStr.split(" |,");
    }

    Collection<AnchorURL>[] pdflinks = null;
    Document[] result = null;
    try {
      // get the links
      pdflinks = extractPdfLinks(pdfDoc);

      // get the fulltext (either per document or for each page)
      final PDFTextStripper stripper = new PDFTextStripper(StandardCharsets.UTF_8.name());

      if (individualPages) {
        // this is a hack which stores individual pages of the source pdf into individual index
        // documents
        // the new documents will get a virtual link with a post argument page=X appended to the
        // original url

        // collect text
        int pagecount = pdfDoc.getNumberOfPages();
        String[] pages = new String[pagecount];
        for (int page = 1; page <= pagecount; page++) {
          stripper.setStartPage(page);
          stripper.setEndPage(page);
          pages[page - 1] = stripper.getText(pdfDoc);
          // System.out.println("PAGE " + page + ": " + pages[page - 1]);
        }

        // create individual documents for each page
        assert pages.length == pdflinks.length
            : "pages.length = " + pages.length + ", pdflinks.length = " + pdflinks.length;
        result = new Document[Math.min(pages.length, pdflinks.length)];
        String loc = location.toNormalform(true);
        for (int page = 0; page < result.length; page++) {
          result[page] =
              new Document(
                  new AnchorURL(
                      loc
                          + (loc.indexOf('?') > 0 ? '&' : '?')
                          + individualPagePropertyname
                          + '='
                          + (page
                              + 1)), // these are virtual new pages; we cannot combine them with '#'
                                     // as that would be removed when computing the urlhash
                  mimeType,
                  StandardCharsets.UTF_8.name(),
                  this,
                  null,
                  docKeywords,
                  singleList(docTitle),
                  docAuthor,
                  docPublisher,
                  null,
                  null,
                  0.0f,
                  0.0f,
                  pages == null || page > pages.length ? new byte[0] : UTF8.getBytes(pages[page]),
                  pdflinks == null || page >= pdflinks.length ? null : pdflinks[page],
                  null,
                  null,
                  false,
                  docDate);
        }
      } else {
        // collect the whole text at once
        final CharBuffer writer = new CharBuffer(odtParser.MAX_DOCSIZE);
        byte[] contentBytes = new byte[0];
        stripper.setEndPage(3); // get first 3 pages (always)
        writer.append(stripper.getText(pdfDoc));
        contentBytes = writer.getBytes(); // remember text in case of interrupting thread

        if (pdfDoc.getNumberOfPages() > 3) { // spare creating/starting thread if all pages read
          stripper.setStartPage(4); // continue with page 4 (terminated, resulting in no text)
          stripper.setEndPage(Integer.MAX_VALUE); // set to default
          // we start the pdf parsing in a separate thread to ensure that it can be terminated
          final PDDocument pdfDocC = pdfDoc;
          final Thread t =
              new Thread() {
                @Override
                public void run() {
                  Thread.currentThread().setName("pdfParser.getText:" + location);
                  try {
                    writer.append(stripper.getText(pdfDocC));
                  } catch (final Throwable e) {
                  }
                }
              };
          t.start();
          t.join(3000); // pdfbox likes to forget to terminate ... (quite often)
          if (t.isAlive()) t.interrupt();
        }
        contentBytes = writer.getBytes(); // get final text before closing writer

        Collection<AnchorURL> pdflinksCombined = new HashSet<AnchorURL>();
        for (Collection<AnchorURL> pdflinksx : pdflinks)
          if (pdflinksx != null) pdflinksCombined.addAll(pdflinksx);
        result =
            new Document[] {
              new Document(
                  location,
                  mimeType,
                  StandardCharsets.UTF_8.name(),
                  this,
                  null,
                  docKeywords,
                  singleList(docTitle),
                  docAuthor,
                  docPublisher,
                  null,
                  null,
                  0.0f,
                  0.0f,
                  contentBytes,
                  pdflinksCombined,
                  null,
                  null,
                  false,
                  docDate)
            };
      }
    } catch (final Throwable e) {
      // close the writer (in finally)
      // throw new Parser.Failure(e.getMessage(), location);
    } finally {
      try {
        pdfDoc.close();
      } catch (final Throwable e) {
      }
    }

    // clear resources in pdfbox. they say that is resolved but it's not. see:
    // https://issues.apache.org/jira/browse/PDFBOX-313
    // https://issues.apache.org/jira/browse/PDFBOX-351
    // https://issues.apache.org/jira/browse/PDFBOX-441
    // the pdfbox still generates enormeous number of object allocations and don't delete these
    // the following Object are statically stored and never flushed:
    // COSFloat, COSArray, COSInteger, COSObjectKey, COSObject, COSDictionary,
    // COSStream, COSString, COSName, COSDocument, COSInteger[], COSNull
    // the great number of these objects can easily be seen in Java Visual VM
    // we try to get this shit out of the memory here by forced clear calls, hope the best the
    // rubbish gets out.
    pdfDoc = null;
    clean_up_idiotic_PDFParser_font_cache_which_eats_up_tons_of_megabytes();

    return result;
  }
 @Override
 public Document[] parse(
     final DigestURL location,
     final String mimeType,
     final String charset,
     final VocabularyScraper scraper,
     final int timezoneOffset,
     final InputStream source)
     throws Parser.Failure, InterruptedException {
   byte[] b = null;
   try {
     b = FileUtils.read(source);
   } catch (final IOException e1) {
     throw new Parser.Failure(e1.toString(), location);
   }
   final BDecoder bd = new BDecoder(b);
   final BObject bo = bd.parse();
   if (bo == null) throw new Parser.Failure("BDecoder.parse returned null", location);
   if (bo.getType() != BType.dictionary)
     throw new Parser.Failure("BDecoder object is not a dictionary", location);
   final Map<String, BObject> map = bo.getMap();
   final BObject commento = map.get("comment");
   final String comment = (commento == null) ? "" : UTF8.String(commento.getString());
   // Date creation = new Date(map.get("creation date").getInteger());
   final BObject infoo = map.get("info");
   final StringBuilder filenames = new StringBuilder(80);
   String title = "";
   if (infoo != null) {
     final Map<String, BObject> info = infoo.getMap();
     final BObject fileso = info.get("files");
     if (fileso != null) {
       final List<BObject> filelist = fileso.getList();
       for (final BObject fo : filelist) {
         final BObject patho = fo.getMap().get("path");
         if (patho != null) {
           final List<BObject> l = patho.getList(); // one file may have several names
           for (final BObject fl : l) {
             filenames.append(fl.toString()).append(" ");
           }
         }
       }
     }
     final BObject nameo = info.get("name");
     if (nameo != null) title = UTF8.String(nameo.getString());
   }
   if (title == null || title.isEmpty()) title = MultiProtocolURL.unescape(location.getFileName());
   return new Document[] {
     new Document(
         location,
         mimeType,
         charset,
         this,
         null,
         null,
         singleList(title), // title
         comment, // author
         location.getHost(),
         null,
         null,
         0.0d,
         0.0d,
         filenames.toString(),
         null,
         null,
         null,
         false,
         new Date())
   };
 }
  public static serverObjects respond(
      final RequestHeader header, final serverObjects post, final serverSwitch env) {

    // return variable that accumulates replacements
    final Switchboard sb = (Switchboard) env;

    // clean up all search events
    SearchEventCache.cleanupEvents(true);
    sb.index.clearCaches(); // every time the ranking is changed we need to remove old orderings

    // inital values for AJAX Elements (without JavaScript)
    final serverObjects prop = new serverObjects();
    prop.put("rejected", 0);

    Segment segment = sb.index;
    Fulltext fulltext = segment.fulltext();
    String localSolr = "/solr/select?core=collection1&q=*:*&start=0&rows=3";
    String remoteSolr =
        env.getConfig(SwitchboardConstants.FEDERATED_SERVICE_SOLR_INDEXING_URL, localSolr);
    if (!remoteSolr.endsWith("/")) remoteSolr = remoteSolr + "/";
    prop.put(
        "urlpublictextSolrURL",
        fulltext.connectedLocalSolr()
            ? localSolr
            : remoteSolr + "collection1/select?&q=*:*&start=0&rows=3");
    prop.putNum("urlpublictextSize", fulltext.collectionSize());
    prop.putNum("urlpublictextSegmentCount", fulltext.getDefaultConnector().getSegmentCount());
    prop.put(
        "webgraphSolrURL",
        fulltext.connectedLocalSolr()
            ? localSolr.replace("collection1", "webgraph")
            : remoteSolr + "webgraph/select?&q=*:*&start=0&rows=3");
    prop.putNum("webgraphSize", fulltext.useWebgraph() ? fulltext.webgraphSize() : 0);
    prop.putNum(
        "webgraphSegmentCount",
        fulltext.useWebgraph() ? fulltext.getWebgraphConnector().getSegmentCount() : 0);
    prop.putNum("citationSize", segment.citationCount());
    prop.putNum("citationSegmentCount", segment.citationSegmentCount());
    prop.putNum("rwipublictextSize", segment.RWICount());
    prop.putNum("rwipublictextSegmentCount", segment.RWISegmentCount());

    prop.put("list", "0");
    prop.put("loaderSize", 0);
    prop.put("loaderMax", 0);
    prop.put("list-loader", 0);

    int coreCrawlJobSize = sb.crawlQueues.coreCrawlJobSize();
    int limitCrawlJobSize = sb.crawlQueues.limitCrawlJobSize();
    int remoteTriggeredCrawlJobSize = sb.crawlQueues.remoteTriggeredCrawlJobSize();
    int noloadCrawlJobSize = sb.crawlQueues.noloadCrawlJobSize();
    int allsize =
        coreCrawlJobSize + limitCrawlJobSize + remoteTriggeredCrawlJobSize + noloadCrawlJobSize;

    prop.put("localCrawlSize", coreCrawlJobSize);
    prop.put("localCrawlState", "");
    prop.put("limitCrawlSize", limitCrawlJobSize);
    prop.put("limitCrawlState", "");
    prop.put("remoteCrawlSize", remoteTriggeredCrawlJobSize);
    prop.put("remoteCrawlState", "");
    prop.put("noloadCrawlSize", noloadCrawlJobSize);
    prop.put("noloadCrawlState", "");
    prop.put("terminate-button", allsize == 0 ? 0 : 1);
    prop.put("list-remote", 0);
    prop.put("forwardToCrawlStart", "0");

    prop.put("info", "0");
    boolean debug = (post != null && post.containsKey("debug"));

    if (post != null) {
      String c = post.toString();
      if (c.length() < 1000) ConcurrentLog.info("Crawl Start", c);
    }

    if (post != null && post.containsKey("queues_terminate_all")) {
      // terminate crawls individually
      sb.crawlQueues.noticeURL.clear();
      for (final byte[] h : sb.crawler.getActive()) {
        CrawlProfile p = sb.crawler.getActive(h);
        if (CrawlSwitchboard.DEFAULT_PROFILES.contains(p.name())) continue;
        if (p != null) sb.crawler.putPassive(h, p);
        sb.crawler.removeActive(h);
        sb.crawler.removePassive(h);
        try {
          sb.crawlQueues.noticeURL.removeByProfileHandle(p.handle(), 10000);
        } catch (SpaceExceededException e) {
        }
      }

      // clear stacks
      for (StackType stackType : StackType.values()) sb.crawlQueues.noticeURL.clear(stackType);
      try {
        sb.cleanProfiles();
      } catch (final InterruptedException e) {
        /* ignore this */
      }

      // remove pause
      sb.continueCrawlJob(SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL);
      sb.setConfig(SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL + "_isPaused_cause", "");
      sb.continueCrawlJob(SwitchboardConstants.CRAWLJOB_REMOTE_TRIGGERED_CRAWL);
      sb.setConfig(SwitchboardConstants.CRAWLJOB_REMOTE_TRIGGERED_CRAWL + "_isPaused_cause", "");
      prop.put("terminate-button", 0);
    }

    if (post != null && post.containsKey("continue")) {
      // continue queue
      final String queue = post.get("continue", "");
      if ("localcrawler".equals(queue)) {
        sb.continueCrawlJob(SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL);
        sb.setConfig(SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL + "_isPaused_cause", "");
      } else if ("remotecrawler".equals(queue)) {
        sb.continueCrawlJob(SwitchboardConstants.CRAWLJOB_REMOTE_TRIGGERED_CRAWL);
        sb.setConfig(SwitchboardConstants.CRAWLJOB_REMOTE_TRIGGERED_CRAWL + "_isPaused_cause", "");
      }
    }

    if (post != null && post.containsKey("pause")) {
      // pause queue
      final String queue = post.get("pause", "");
      if ("localcrawler".equals(queue)) {
        sb.pauseCrawlJob(
            SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL,
            "user request in Crawler_p from " + header.refererHost());
      } else if ("remotecrawler".equals(queue)) {
        sb.pauseCrawlJob(
            SwitchboardConstants.CRAWLJOB_REMOTE_TRIGGERED_CRAWL,
            "user request in Crawler_p from " + header.refererHost());
      }
    }
    String queuemessage =
        sb.getConfig(SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL + "_isPaused_cause", "");
    if (queuemessage.length() == 0) {
      prop.put("info-queue", 0);
    } else {
      prop.put("info-queue", 1);
      prop.putHTML("info-queue_message", "pause reason: " + queuemessage);
    }

    if (post != null && post.containsKey("terminate"))
      try {
        final String handle = post.get("handle", "");
        // termination of a crawl: shift the crawl from active to passive
        final CrawlProfile p = sb.crawler.getActive(handle.getBytes());
        if (p != null) sb.crawler.putPassive(handle.getBytes(), p);
        // delete all entries from the crawl queue that are deleted here
        sb.crawler.removeActive(handle.getBytes());
        sb.crawler.removePassive(handle.getBytes());
        sb.crawlQueues.noticeURL.removeByProfileHandle(handle, 10000);
      } catch (final SpaceExceededException e) {
        ConcurrentLog.logException(e);
      }

    if (post != null && post.containsKey("crawlingstart")) {
      // init crawl
      if (sb.peers == null) {
        prop.put("info", "3");
      } else {

        // remove crawlingFileContent before we record the call
        String crawlingFileName = post.get("crawlingFile");
        final File crawlingFile;
        if (crawlingFileName == null || crawlingFileName.isEmpty()) {
          crawlingFile = null;
        } else {
          if (crawlingFileName.startsWith("file://"))
            crawlingFileName = crawlingFileName.substring(7);
          crawlingFile = new File(crawlingFileName);
        }
        if (crawlingFile != null && crawlingFile.exists()) {
          post.remove("crawlingFile$file");
        }

        // prepare some filter that are adjusted in case that this is wanted
        boolean storeHTCache = "on".equals(post.get("storeHTCache", "off"));
        String newcrawlingMustMatch = post.get("mustmatch", CrawlProfile.MATCH_ALL_STRING);
        String newcrawlingMustNotMatch = post.get("mustnotmatch", CrawlProfile.MATCH_NEVER_STRING);
        if (newcrawlingMustMatch.length() < 2)
          newcrawlingMustMatch =
              CrawlProfile
                  .MATCH_ALL_STRING; // avoid that all urls are filtered out if bad value was
        // submitted
        boolean fullDomain =
            "domain".equals(post.get("range", "wide")); // special property in simple crawl start
        boolean subPath =
            "subpath".equals(post.get("range", "wide")); // special property in simple crawl start

        final boolean restrictedcrawl =
            fullDomain || subPath || !CrawlProfile.MATCH_ALL_STRING.equals(newcrawlingMustMatch);
        final boolean deleteage = restrictedcrawl && "age".equals(post.get("deleteold", "off"));
        Date deleteageDate = null;
        if (deleteage) {
          deleteageDate =
              timeParser(
                  true,
                  post.getInt("deleteIfOlderNumber", -1),
                  post.get("deleteIfOlderUnit", "year")); // year, month, day, hour
        }
        final boolean deleteold =
            (deleteage && deleteageDate != null)
                || (restrictedcrawl && post.getBoolean("deleteold"));

        final String sitemapURLStr = post.get("sitemapURL", "");
        String crawlingStart0 = post.get("crawlingURL", "").trim(); // the crawljob start url
        String[] rootURLs0 =
            crawlingStart0.indexOf('\n') > 0 || crawlingStart0.indexOf('\r') > 0
                ? crawlingStart0.split("[\\r\\n]+")
                : crawlingStart0.split(Pattern.quote("|"));
        Set<DigestURL> rootURLs = new HashSet<DigestURL>();
        String crawlName = "";
        if (crawlingFile == null)
          for (String crawlingStart : rootURLs0) {
            if (crawlingStart == null || crawlingStart.length() == 0) continue;
            // add the prefix http:// if necessary
            int pos = crawlingStart.indexOf("://", 0);
            if (pos == -1) {
              if (crawlingStart.startsWith("ftp")) crawlingStart = "ftp://" + crawlingStart;
              else crawlingStart = "http://" + crawlingStart;
            }
            try {
              DigestURL crawlingStartURL = new DigestURL(crawlingStart);
              rootURLs.add(crawlingStartURL);
              crawlName +=
                  ((crawlingStartURL.getHost() == null)
                          ? crawlingStartURL.toNormalform(true)
                          : crawlingStartURL.getHost())
                      + ',';
              if (crawlingStartURL != null
                  && (crawlingStartURL.isFile() || crawlingStartURL.isSMB())) storeHTCache = false;

            } catch (final MalformedURLException e) {
              ConcurrentLog.logException(e);
            }
          }
        else {
          crawlName = crawlingFile.getName();
        }
        if (crawlName.endsWith(",")) crawlName = crawlName.substring(0, crawlName.length() - 1);
        if (crawlName.length() > 64) {
          crawlName =
              "crawl_for_"
                  + rootURLs.size()
                  + "_start_points_"
                  + Integer.toHexString(crawlName.hashCode());
          int p = crawlName.lastIndexOf(',');
          if (p >= 8) crawlName = crawlName.substring(0, p);
        }
        if (crawlName.length() == 0 && sitemapURLStr.length() > 0)
          crawlName = "sitemap loader for " + sitemapURLStr;
        // in case that a root url has a file protocol, then the site filter does not work, patch
        // that:
        if (fullDomain) {
          for (DigestURL u : rootURLs)
            if (u.isFile()) {
              fullDomain = false;
              subPath = true;
              break;
            }
        }

        // delete old robots entries
        for (DigestURL ru : rootURLs) {
          sb.robots.delete(ru);
          try {
            if (ru.getHost() != null) { // might be null for file://
              Cache.delete(RobotsTxt.robotsURL(RobotsTxt.getHostPort(ru)).hash());
            }
          } catch (IOException e) {
          }
        }
        try {
          sb.robots.clear();
        } catch (IOException e) {
        } // to be safe: clear all.

        // set the crawl filter
        String ipMustMatch = post.get("ipMustmatch", CrawlProfile.MATCH_ALL_STRING);
        final String ipMustNotMatch = post.get("ipMustnotmatch", CrawlProfile.MATCH_NEVER_STRING);
        if (ipMustMatch.length() < 2) ipMustMatch = CrawlProfile.MATCH_ALL_STRING;
        final String countryMustMatch =
            post.getBoolean("countryMustMatchSwitch") ? post.get("countryMustMatchList", "") : "";
        sb.setConfig("crawlingIPMustMatch", ipMustMatch);
        sb.setConfig("crawlingIPMustNotMatch", ipMustNotMatch);
        if (countryMustMatch.length() > 0)
          sb.setConfig("crawlingCountryMustMatch", countryMustMatch);

        String crawlerNoDepthLimitMatch =
            post.get("crawlingDepthExtension", CrawlProfile.MATCH_NEVER_STRING);
        final String indexUrlMustMatch = post.get("indexmustmatch", CrawlProfile.MATCH_ALL_STRING);
        final String indexUrlMustNotMatch =
            post.get("indexmustnotmatch", CrawlProfile.MATCH_NEVER_STRING);
        final String indexContentMustMatch =
            post.get("indexcontentmustmatch", CrawlProfile.MATCH_ALL_STRING);
        final String indexContentMustNotMatch =
            post.get("indexcontentmustnotmatch", CrawlProfile.MATCH_NEVER_STRING);

        final boolean crawlOrder = post.get("crawlOrder", "off").equals("on");
        env.setConfig("crawlOrder", crawlOrder);

        if (crawlOrder)
          crawlerNoDepthLimitMatch =
              CrawlProfile.MATCH_NEVER_STRING; // without limitation the crawl order does not work

        int newcrawlingdepth = post.getInt("crawlingDepth", 8);
        env.setConfig("crawlingDepth", Integer.toString(newcrawlingdepth));
        if ((crawlOrder) && (newcrawlingdepth > 8)) newcrawlingdepth = 8;

        boolean directDocByURL =
            "on"
                .equals(
                    post.get(
                        "directDocByURL",
                        "off")); // catch also all linked media documents without loading them
        env.setConfig("crawlingDirectDocByURL", directDocByURL);

        final String collection = post.get("collection", "user");
        env.setConfig("collection", collection);

        // recrawl
        final String recrawl = post.get("recrawl", "nodoubles"); // nodoubles, reload, scheduler
        Date crawlingIfOlder = null;
        if ("reload".equals(recrawl)) {
          crawlingIfOlder =
              timeParser(
                  true,
                  post.getInt("reloadIfOlderNumber", -1),
                  post.get("reloadIfOlderUnit", "year")); // year, month, day, hour
        }
        env.setConfig(
            "crawlingIfOlder",
            crawlingIfOlder == null ? Long.MAX_VALUE : crawlingIfOlder.getTime());

        // store this call as api call
        sb.tables.recordAPICall(
            post,
            "Crawler_p.html",
            WorkTables.TABLE_API_TYPE_CRAWLER,
            "crawl start for "
                + ((rootURLs.size() == 0)
                    ? post.get("crawlingFile", "")
                    : rootURLs.iterator().next().toNormalform(true)));

        final boolean crawlingDomMaxCheck = "on".equals(post.get("crawlingDomMaxCheck", "off"));
        final int crawlingDomMaxPages =
            (crawlingDomMaxCheck) ? post.getInt("crawlingDomMaxPages", -1) : -1;
        env.setConfig("crawlingDomMaxPages", Integer.toString(crawlingDomMaxPages));

        boolean followFrames = "on".equals(post.get("followFrames", "false"));
        env.setConfig("followFrames", followFrames);

        boolean obeyHtmlRobotsNoindex = "on".equals(post.get("obeyHtmlRobotsNoindex", "false"));
        env.setConfig("obeyHtmlRobotsNoindex", obeyHtmlRobotsNoindex);

        boolean obeyHtmlRobotsNofollow = "on".equals(post.get("obeyHtmlRobotsNofollow", "false"));
        env.setConfig("obeyHtmlRobotsNofollow", obeyHtmlRobotsNofollow);

        final boolean indexText = "on".equals(post.get("indexText", "false"));
        env.setConfig("indexText", indexText);

        final boolean indexMedia = "on".equals(post.get("indexMedia", "false"));
        env.setConfig("indexMedia", indexMedia);

        env.setConfig("storeHTCache", storeHTCache);

        String defaultAgentName =
            sb.isIntranetMode()
                ? ClientIdentification.yacyIntranetCrawlerAgentName
                : ClientIdentification.yacyInternetCrawlerAgentName;
        String agentName = post.get("agentName", defaultAgentName);
        ClientIdentification.Agent agent = ClientIdentification.getAgent(agentName);
        if (agent == null) agent = ClientIdentification.getAgent(defaultAgentName);

        CacheStrategy cachePolicy = CacheStrategy.parse(post.get("cachePolicy", "iffresh"));
        if (cachePolicy == null) cachePolicy = CacheStrategy.IFFRESH;

        String crawlingMode = post.get("crawlingMode", "url");

        if ("file".equals(crawlingMode) && post.containsKey("crawlingFile")) {
          newcrawlingMustNotMatch = CrawlProfile.MATCH_NEVER_STRING;
          directDocByURL = false;
        }

        if ("sitemap".equals(crawlingMode)) {
          newcrawlingMustMatch = CrawlProfile.MATCH_ALL_STRING;
          newcrawlingMustNotMatch = CrawlProfile.MATCH_NEVER_STRING;
          newcrawlingdepth = 0;
          directDocByURL = false;
        }

        if ("sitelist".equals(crawlingMode)) {
          newcrawlingMustNotMatch = CrawlProfile.MATCH_NEVER_STRING;
          Set<DigestURL> newRootURLs = new HashSet<DigestURL>();
          for (DigestURL sitelistURL : rootURLs) {
            // download document
            Document scraper;
            try {
              scraper =
                  sb.loader.loadDocument(
                      sitelistURL, CacheStrategy.IFFRESH, BlacklistType.CRAWLER, agent);
              // get links and generate filter
              for (DigestURL u : scraper.getHyperlinks().keySet()) {
                newRootURLs.add(u);
              }
            } catch (final IOException e) {
              ConcurrentLog.logException(e);
            }
          }
          rootURLs = newRootURLs;
          crawlingMode = "url";
          if ((fullDomain || subPath) && newcrawlingdepth > 0)
            newcrawlingMustMatch =
                CrawlProfile
                    .MATCH_ALL_STRING; // to prevent that there is a restriction on the original
          // urls
        }

        // delete all error urls for that domain
        // and all urls for that host from the crawl queue
        Set<String> hosthashes = new HashSet<String>();
        boolean anysmbftporpdf = false;
        for (DigestURL u : rootURLs) {
          sb.index.fulltext().remove(u.hash());
          hosthashes.add(u.hosthash());
          if ("smb.ftp".indexOf(u.getProtocol()) >= 0
              || "pdf".equals(MultiProtocolURL.getFileExtension(u.getFileName())))
            anysmbftporpdf = true;
        }
        sb.crawlQueues.removeHosts(hosthashes);
        sb.index.fulltext().commit(true);

        boolean crawlingQ =
            anysmbftporpdf
                || "on".equals(post.get("crawlingQ", "off"))
                || "sitemap".equals(crawlingMode);
        env.setConfig("crawlingQ", crawlingQ);

        // compute mustmatch filter according to rootURLs
        if ((fullDomain || subPath) && newcrawlingdepth > 0) {
          String siteFilter = ".*";
          if (fullDomain) {
            siteFilter = CrawlProfile.siteFilter(rootURLs);
            if (deleteold) {
              sb.index.fulltext().deleteStaleDomainHashes(hosthashes, deleteageDate);
            }
          } else if (subPath) {
            siteFilter = CrawlProfile.subpathFilter(rootURLs);
            if (deleteold) {
              for (DigestURL u : rootURLs) {
                String basepath = u.toNormalform(true);
                if (!basepath.endsWith("/")) {
                  int p = basepath.lastIndexOf("/");
                  if (p > 0) basepath = basepath.substring(0, p + 1);
                }
                int count = sb.index.fulltext().remove(basepath, deleteageDate);
                if (count > 0)
                  ConcurrentLog.info(
                      "Crawler_p", "deleted " + count + " documents for host " + u.getHost());
              }
            }
          }
          if (CrawlProfile.MATCH_ALL_STRING.equals(newcrawlingMustMatch)) {
            newcrawlingMustMatch = siteFilter;
          } else if (!CrawlProfile.MATCH_ALL_STRING.equals(siteFilter)) {
            // combine both
            newcrawlingMustMatch = "(" + newcrawlingMustMatch + ")|(" + siteFilter + ")";
          }
        }

        // check if the crawl filter works correctly
        try {
          Pattern mmp = Pattern.compile(newcrawlingMustMatch);
          for (DigestURL u : rootURLs) {
            assert mmp.matcher(u.toNormalform(true)).matches()
                : "pattern " + mmp.toString() + " does not match url " + u.toNormalform(true);
          }
        } catch (final PatternSyntaxException e) {
          prop.put("info", "4"); // crawlfilter does not match url
          prop.putHTML("info_newcrawlingfilter", newcrawlingMustMatch);
          prop.putHTML("info_error", e.getMessage());
        }

        boolean hasCrawlstartDataOK = !crawlName.isEmpty();
        if (hasCrawlstartDataOK) {
          // check crawlurl was given in sitecrawl
          if ("url".equals(crawlingMode) && rootURLs.size() == 0) hasCrawlstartDataOK = false;
        }

        String snapshotsMaxDepthString = post.get("snapshotsMaxDepth", "-1");
        int snapshotsMaxDepth = Integer.parseInt(snapshotsMaxDepthString);
        boolean snapshotsLoadImage = post.getBoolean("snapshotsLoadImage");
        boolean snapshotsReplaceOld = post.getBoolean("snapshotsReplaceOld");
        String snapshotsMustnotmatch = post.get("snapshotsMustnotmatch", "");

        // get vocabulary scraper info
        JSONObject vocabulary_scraper =
            new JSONObject(); // key = vocabulary_name, value = properties with key = type (i.e.
        // 'class') and value = keyword in context
        for (String key : post.keySet()) {
          if (key.startsWith("vocabulary_")) {
            if (key.endsWith("_class")) {
              String vocabulary = key.substring(11, key.length() - 6);
              String value = post.get(key);
              if (value != null && value.length() > 0) {
                JSONObject props;
                try {
                  props = vocabulary_scraper.getJSONObject(vocabulary);
                } catch (JSONException e) {
                  props = new JSONObject();
                  vocabulary_scraper.put(vocabulary, props);
                }
                props.put("class", value);
              }
            }
          }
        }

        int timezoneOffset = post.getInt("timezoneOffset", 0);

        // in case that we crawl from a file, load that file and re-compute mustmatch pattern
        List<AnchorURL> hyperlinks_from_file = null;
        if ("file".equals(crawlingMode)
            && post.containsKey("crawlingFile")
            && crawlingFile != null) {
          final String crawlingFileContent = post.get("crawlingFile$file", "");
          try {
            // check if the crawl filter works correctly
            final ContentScraper scraper =
                new ContentScraper(
                    new DigestURL(crawlingFile), 10000000, new VocabularyScraper(), timezoneOffset);
            final Writer writer = new TransformerWriter(null, null, scraper, null, false);
            if (crawlingFile != null && crawlingFile.exists()) {
              FileUtils.copy(new FileInputStream(crawlingFile), writer);
            } else {
              FileUtils.copy(crawlingFileContent, writer);
            }
            writer.close();

            // get links and generate filter
            hyperlinks_from_file = scraper.getAnchors();
            if (newcrawlingdepth > 0) {
              if (fullDomain) {
                newcrawlingMustMatch = CrawlProfile.siteFilter(hyperlinks_from_file);
              } else if (subPath) {
                newcrawlingMustMatch = CrawlProfile.subpathFilter(hyperlinks_from_file);
              }
            }
          } catch (final Exception e) {
            // mist
            prop.put("info", "7"); // Error with file
            prop.putHTML("info_crawlingStart", crawlingFileName);
            prop.putHTML("info_error", e.getMessage());
            ConcurrentLog.logException(e);
          }
          sb.continueCrawlJob(SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL);
        }

        // prepare a new crawling profile
        final CrawlProfile profile;
        byte[] handle;
        if (hasCrawlstartDataOK) {
          profile =
              new CrawlProfile(
                  crawlName,
                  newcrawlingMustMatch,
                  newcrawlingMustNotMatch,
                  ipMustMatch,
                  ipMustNotMatch,
                  countryMustMatch,
                  crawlerNoDepthLimitMatch,
                  indexUrlMustMatch,
                  indexUrlMustNotMatch,
                  indexContentMustMatch,
                  indexContentMustNotMatch,
                  newcrawlingdepth,
                  directDocByURL,
                  crawlingIfOlder,
                  crawlingDomMaxPages,
                  crawlingQ,
                  followFrames,
                  obeyHtmlRobotsNoindex,
                  obeyHtmlRobotsNofollow,
                  indexText,
                  indexMedia,
                  storeHTCache,
                  crawlOrder,
                  snapshotsMaxDepth,
                  snapshotsLoadImage,
                  snapshotsReplaceOld,
                  snapshotsMustnotmatch,
                  cachePolicy,
                  collection,
                  agentName,
                  new VocabularyScraper(vocabulary_scraper),
                  timezoneOffset);
          handle = ASCII.getBytes(profile.handle());

          // before we fire up a new crawl, we make sure that another crawl with the same name is
          // not running
          sb.crawler.removeActive(handle);
          sb.crawler.removePassive(handle);
          try {
            sb.crawlQueues.noticeURL.removeByProfileHandle(profile.handle(), 10000);
          } catch (final SpaceExceededException e1) {
          }
        } else {
          profile = null;
          handle = null;
        }

        // start the crawl
        if ("url".equals(crawlingMode)) {
          if (rootURLs.size() == 0) {
            prop.put("info", "5"); // Crawling failed
            prop.putHTML("info_crawlingURL", "(no url given)");
            prop.putHTML("info_reasonString", "you must submit at least one crawl url");
          } else {

            // stack requests
            sb.crawler.putActive(handle, profile);
            final Set<DigestURL> successurls = new HashSet<DigestURL>();
            final Map<DigestURL, String> failurls = new HashMap<DigestURL, String>();
            sb.stackURLs(rootURLs, profile, successurls, failurls);

            if (failurls.size() == 0) {
              // liftoff!
              prop.put("info", "8");
              prop.putHTML("info_crawlingURL", post.get("crawlingURL"));

              // generate a YaCyNews if the global flag was set
              if (!sb.isRobinsonMode() && crawlOrder) {
                final Map<String, String> m =
                    new HashMap<String, String>(profile); // must be cloned
                m.remove("specificDepth");
                m.remove("indexText");
                m.remove("indexMedia");
                m.remove("remoteIndexing");
                m.remove("xsstopw");
                m.remove("xpstopw");
                m.remove("xdstopw");
                m.remove("storeTXCache");
                m.remove("storeHTCache");
                m.remove("generalFilter");
                m.remove("specificFilter");
                m.put("intention", post.get("intention", "").replace(',', '/'));
                sb.peers.newsPool.publishMyNews(
                    sb.peers.mySeed(), NewsPool.CATEGORY_CRAWL_START, m);
              }
            } else {
              StringBuilder fr = new StringBuilder();
              for (Map.Entry<DigestURL, String> failure : failurls.entrySet()) {
                sb.crawlQueues.errorURL.push(
                    failure.getKey(),
                    0,
                    null,
                    FailCategory.FINAL_LOAD_CONTEXT,
                    failure.getValue(),
                    -1);
                fr.append(failure.getValue()).append('/');
              }

              prop.put("info", "5"); // Crawling failed
              prop.putHTML("info_crawlingURL", (post.get("crawlingURL")));
              prop.putHTML("info_reasonString", fr.toString());
            }
            if (successurls.size() > 0)
              sb.continueCrawlJob(SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL);
          }
        } else if ("sitemap".equals(crawlingMode)) {
          try {
            final DigestURL sitemapURL =
                sitemapURLStr.indexOf("//") > 0
                    ? new DigestURL(sitemapURLStr)
                    : new DigestURL(
                        rootURLs.iterator().next(),
                        sitemapURLStr); // fix for relative paths which should not exist but are
            // used anyway
            sb.crawler.putActive(handle, profile);
            final SitemapImporter importer = new SitemapImporter(sb, sitemapURL, profile);
            importer.start();
            sb.continueCrawlJob(SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL);
          } catch (final Exception e) {
            // mist
            prop.put("info", "6"); // Error with url
            prop.putHTML("info_crawlingStart", sitemapURLStr);
            prop.putHTML("info_error", e.getMessage());
            ConcurrentLog.logException(e);
          }
        } else if ("file".equals(crawlingMode)) {
          if (post.containsKey("crawlingFile")
              && crawlingFile != null
              && hyperlinks_from_file != null) {
            try {
              if (newcrawlingdepth > 0) {
                if (fullDomain) {
                  newcrawlingMustMatch = CrawlProfile.siteFilter(hyperlinks_from_file);
                } else if (subPath) {
                  newcrawlingMustMatch = CrawlProfile.subpathFilter(hyperlinks_from_file);
                }
              }
              sb.crawler.putActive(handle, profile);
              sb.crawlStacker.enqueueEntriesAsynchronous(
                  sb.peers.mySeed().hash.getBytes(),
                  profile.handle(),
                  hyperlinks_from_file,
                  profile.timezoneOffset());
            } catch (final PatternSyntaxException e) {
              prop.put("info", "4"); // crawlfilter does not match url
              prop.putHTML("info_newcrawlingfilter", newcrawlingMustMatch);
              prop.putHTML("info_error", e.getMessage());
            } catch (final Exception e) {
              // mist
              prop.put("info", "7"); // Error with file
              prop.putHTML("info_crawlingStart", crawlingFileName);
              prop.putHTML("info_error", e.getMessage());
              ConcurrentLog.logException(e);
            }
            sb.continueCrawlJob(SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL);
          }
        }
      }
    }

    /*
    *  <input id="customPPM" name="customPPM" type="number" min="10" max="30000" style="width:46px" value="#[customPPMdefault]#" />PPM
       <input id="latencyFactor" name="latencyFactor" type="number" min="0.1" max="3.0" step="0.1" style="width:32px" value="#[latencyFactorDefault]#" />LF
       <input id="MaxSameHostInQueue" name="MaxSameHostInQueue" type="number" min="1" max="30" style="width:32px" value="#[MaxSameHostInQueueDefault]#" />MH
       <input type="submit" name="crawlingPerformance" value="set" />
       (<a href="/Crawler_p.html?crawlingPerformance=minimum">min</a>/<a href="/Crawler_p.html?crawlingPerformance=maximum">max</a>)
       </td>
    */
    if (post != null && post.containsKey("crawlingPerformance")) {
      final String crawlingPerformance = post.get("crawlingPerformance", "custom");
      final long LCbusySleep1 =
          sb.getConfigLong(SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL_BUSYSLEEP, 1000L);
      int wantedPPM = (LCbusySleep1 == 0) ? 30000 : (int) (60000L / LCbusySleep1);
      try {
        wantedPPM = post.getInt("customPPM", wantedPPM);
      } catch (final NumberFormatException e) {
      }
      if ("minimum".equals(crawlingPerformance.toLowerCase())) wantedPPM = 10;
      if ("maximum".equals(crawlingPerformance.toLowerCase())) wantedPPM = 30000;

      int wPPM = wantedPPM;
      if (wPPM <= 0) {
        wPPM = 1;
      }
      if (wPPM >= 30000) {
        wPPM = 30000;
      }
      final int newBusySleep = 60000 / wPPM; // for wantedPPM = 10: 6000; for wantedPPM = 1000: 60
      final float loadprereq =
          wantedPPM <= 10 ? 1.0f : wantedPPM <= 100 ? 2.0f : wantedPPM >= 1000 ? 8.0f : 3.0f;

      BusyThread thread;

      thread = sb.getThread(SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL);
      if (thread != null) {
        sb.setConfig(
            SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL_BUSYSLEEP, thread.setBusySleep(newBusySleep));
        sb.setConfig(
            SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL_LOADPREREQ,
            thread.setLoadPreReqisite(loadprereq));
        thread.setLoadPreReqisite(loadprereq);
        thread.setIdleSleep(2000);
      }

      float latencyFactor = post.getFloat("latencyFactor", 0.5f);
      int MaxSameHostInQueue = post.getInt("MaxSameHostInQueue", 20);
      env.setConfig(SwitchboardConstants.CRAWLER_LATENCY_FACTOR, latencyFactor);
      env.setConfig(SwitchboardConstants.CRAWLER_MAX_SAME_HOST_IN_QUEUE, MaxSameHostInQueue);
    }

    // performance settings
    final long LCbusySleep =
        env.getConfigLong(SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL_BUSYSLEEP, 1000L);
    final int LCppm = (int) (60000L / Math.max(1, LCbusySleep));
    prop.put("customPPMdefault", Integer.toString(LCppm));
    prop.put(
        "latencyFactorDefault",
        env.getConfigFloat(SwitchboardConstants.CRAWLER_LATENCY_FACTOR, 0.5f));
    prop.put(
        "MaxSameHostInQueueDefault",
        env.getConfigInt(SwitchboardConstants.CRAWLER_MAX_SAME_HOST_IN_QUEUE, 20));

    // generate crawl profile table
    int count = 0;
    boolean dark = true;
    final int domlistlength = (post == null) ? 160 : post.getInt("domlistlength", 160);
    CrawlProfile profile;
    // put active crawls into list
    String hosts = "";
    for (final byte[] h : sb.crawler.getActive()) {
      profile = sb.crawler.getActive(h);
      if (CrawlSwitchboard.DEFAULT_PROFILES.contains(profile.name())) continue;
      profile.putProfileEntry("crawlProfilesShow_list_", prop, true, dark, count, domlistlength);
      prop.put("crawlProfilesShow_list_" + count + "_debug", debug ? 1 : 0);
      if (debug) {
        RowHandleSet urlhashes = sb.crawler.getURLHashes(h);
        prop.put(
            "crawlProfilesShow_list_" + count + "_debug_count",
            urlhashes == null ? "unknown" : Integer.toString(urlhashes.size()));
      }
      hosts = hosts + "," + profile.name();
      dark = !dark;
      count++;
    }
    prop.put("crawlProfilesShow_debug", debug ? 1 : 0);
    prop.put("crawlProfilesShow_list", count);
    prop.put("crawlProfilesShow_count", count);
    prop.put("crawlProfilesShow", count == 0 ? 0 : 1);

    prop.put("crawlProfilesShow_linkstructure", 0);

    if (post != null) { // handle config button to display graphic
      if (post.get("hidewebstructuregraph") != null)
        sb.setConfig(SwitchboardConstants.DECORATION_GRAFICS_LINKSTRUCTURE, false);
      if (post.get("showwebstructuregraph") != null)
        sb.setConfig(SwitchboardConstants.DECORATION_GRAFICS_LINKSTRUCTURE, true);
    }
    if (count > 0
        && sb.getConfigBool(SwitchboardConstants.DECORATION_GRAFICS_LINKSTRUCTURE, true)) {
      // collect the host names for 'wide' crawls which can be visualized
      boolean showLinkstructure = hosts.length() > 0 && !hosts.contains("file:");
      if (showLinkstructure) {
        StringBuilder q = new StringBuilder();
        hosts = hosts.substring(1);
        q.append(CollectionSchema.host_s.getSolrFieldName())
            .append(':')
            .append(hosts)
            .append(" OR ")
            .append(CollectionSchema.host_s.getSolrFieldName())
            .append(':')
            .append("www.")
            .append(hosts);
        try {
          prop.put(
              "crawlProfilesShow_linkstructure",
              count == 1
                      && sb.index.fulltext().getDefaultConnector().getCountByQuery(q.toString()) > 0
                  ? 1
                  : 2);
          prop.put("crawlProfilesShow_linkstructure_hosts", hosts);
        } catch (IOException e) {
        }
      }
    }

    // return rewrite properties
    return prop;
  }