public static ConcurrentHashMap<String, byte[]> loadMapB(final File f) {
   ConcurrentHashMap<String, String> m = loadMap(f);
   if (m == null) return null;
   ConcurrentHashMap<String, byte[]> mb = new ConcurrentHashMap<String, byte[]>();
   for (Map.Entry<String, String> e : m.entrySet())
     mb.put(e.getKey(), UTF8.getBytes(e.getValue()));
   return mb;
 }
  @Override
  public Document[] parse(
      final AnchorURL location,
      final String mimeType,
      final String charset,
      final VocabularyScraper scraper,
      final int timezoneOffset,
      final InputStream source)
      throws Parser.Failure, InterruptedException {

    // check memory for parser
    if (!MemoryControl.request(200 * 1024 * 1024, false))
      throw new Parser.Failure(
          "Not enough Memory available for pdf parser: " + MemoryControl.available(), location);

    // create a pdf parser
    PDDocument pdfDoc;
    try {
      Thread.currentThread().setPriority(Thread.MIN_PRIORITY); // the pdfparser is a big pain
      // pdfDoc = PDDocument.load(source);
      final PDFParser pdfParser = new PDFParser(source);
      pdfParser.setTempDirectory(new File(System.getProperty("java.io.tmpdir")));
      pdfParser.parse();
      pdfDoc = pdfParser.getPDDocument();
    } catch (final IOException e) {
      throw new Parser.Failure(e.getMessage(), location);
    } finally {
      Thread.currentThread().setPriority(Thread.NORM_PRIORITY);
    }

    if (pdfDoc.isEncrypted()) {
      try {
        pdfDoc.openProtection(new StandardDecryptionMaterial(""));
      } catch (final BadSecurityHandlerException e) {
        try {
          pdfDoc.close();
        } catch (final IOException ee) {
        }
        throw new Parser.Failure("Document is encrypted (1): " + e.getMessage(), location);
      } catch (final IOException e) {
        try {
          pdfDoc.close();
        } catch (final IOException ee) {
        }
        throw new Parser.Failure("Document is encrypted (2): " + e.getMessage(), location);
      } catch (final CryptographyException e) {
        try {
          pdfDoc.close();
        } catch (final IOException ee) {
        }
        throw new Parser.Failure("Document is encrypted (3): " + e.getMessage(), location);
      }
      final AccessPermission perm = pdfDoc.getCurrentAccessPermission();
      if (perm == null || !perm.canExtractContent()) {
        try {
          pdfDoc.close();
        } catch (final IOException ee) {
        }
        throw new Parser.Failure("Document is encrypted and cannot be decrypted", location);
      }
    }

    // extracting some metadata
    PDDocumentInformation info = pdfDoc.getDocumentInformation();
    String docTitle = null,
        docSubject = null,
        docAuthor = null,
        docPublisher = null,
        docKeywordStr = null;
    Date docDate = new Date();
    if (info != null) {
      docTitle = info.getTitle();
      docSubject = info.getSubject();
      docAuthor = info.getAuthor();
      docPublisher = info.getProducer();
      if (docPublisher == null || docPublisher.isEmpty()) docPublisher = info.getCreator();
      docKeywordStr = info.getKeywords();
      try {
        if (info.getModificationDate() != null) docDate = info.getModificationDate().getTime();
      } catch (IOException e) {
      }
      // unused:
      // info.getTrapped());
    }
    info = null;

    if (docTitle == null || docTitle.isEmpty()) {
      docTitle = MultiProtocolURL.unescape(location.getFileName());
    }
    if (docTitle == null) {
      docTitle = docSubject;
    }
    String[] docKeywords = null;
    if (docKeywordStr != null) {
      docKeywords = docKeywordStr.split(" |,");
    }

    Collection<AnchorURL>[] pdflinks = null;
    Document[] result = null;
    try {
      // get the links
      pdflinks = extractPdfLinks(pdfDoc);

      // get the fulltext (either per document or for each page)
      final PDFTextStripper stripper = new PDFTextStripper(StandardCharsets.UTF_8.name());

      if (individualPages) {
        // this is a hack which stores individual pages of the source pdf into individual index
        // documents
        // the new documents will get a virtual link with a post argument page=X appended to the
        // original url

        // collect text
        int pagecount = pdfDoc.getNumberOfPages();
        String[] pages = new String[pagecount];
        for (int page = 1; page <= pagecount; page++) {
          stripper.setStartPage(page);
          stripper.setEndPage(page);
          pages[page - 1] = stripper.getText(pdfDoc);
          // System.out.println("PAGE " + page + ": " + pages[page - 1]);
        }

        // create individual documents for each page
        assert pages.length == pdflinks.length
            : "pages.length = " + pages.length + ", pdflinks.length = " + pdflinks.length;
        result = new Document[Math.min(pages.length, pdflinks.length)];
        String loc = location.toNormalform(true);
        for (int page = 0; page < result.length; page++) {
          result[page] =
              new Document(
                  new AnchorURL(
                      loc
                          + (loc.indexOf('?') > 0 ? '&' : '?')
                          + individualPagePropertyname
                          + '='
                          + (page
                              + 1)), // these are virtual new pages; we cannot combine them with '#'
                                     // as that would be removed when computing the urlhash
                  mimeType,
                  StandardCharsets.UTF_8.name(),
                  this,
                  null,
                  docKeywords,
                  singleList(docTitle),
                  docAuthor,
                  docPublisher,
                  null,
                  null,
                  0.0f,
                  0.0f,
                  pages == null || page > pages.length ? new byte[0] : UTF8.getBytes(pages[page]),
                  pdflinks == null || page >= pdflinks.length ? null : pdflinks[page],
                  null,
                  null,
                  false,
                  docDate);
        }
      } else {
        // collect the whole text at once
        final CharBuffer writer = new CharBuffer(odtParser.MAX_DOCSIZE);
        byte[] contentBytes = new byte[0];
        stripper.setEndPage(3); // get first 3 pages (always)
        writer.append(stripper.getText(pdfDoc));
        contentBytes = writer.getBytes(); // remember text in case of interrupting thread

        if (pdfDoc.getNumberOfPages() > 3) { // spare creating/starting thread if all pages read
          stripper.setStartPage(4); // continue with page 4 (terminated, resulting in no text)
          stripper.setEndPage(Integer.MAX_VALUE); // set to default
          // we start the pdf parsing in a separate thread to ensure that it can be terminated
          final PDDocument pdfDocC = pdfDoc;
          final Thread t =
              new Thread() {
                @Override
                public void run() {
                  Thread.currentThread().setName("pdfParser.getText:" + location);
                  try {
                    writer.append(stripper.getText(pdfDocC));
                  } catch (final Throwable e) {
                  }
                }
              };
          t.start();
          t.join(3000); // pdfbox likes to forget to terminate ... (quite often)
          if (t.isAlive()) t.interrupt();
        }
        contentBytes = writer.getBytes(); // get final text before closing writer

        Collection<AnchorURL> pdflinksCombined = new HashSet<AnchorURL>();
        for (Collection<AnchorURL> pdflinksx : pdflinks)
          if (pdflinksx != null) pdflinksCombined.addAll(pdflinksx);
        result =
            new Document[] {
              new Document(
                  location,
                  mimeType,
                  StandardCharsets.UTF_8.name(),
                  this,
                  null,
                  docKeywords,
                  singleList(docTitle),
                  docAuthor,
                  docPublisher,
                  null,
                  null,
                  0.0f,
                  0.0f,
                  contentBytes,
                  pdflinksCombined,
                  null,
                  null,
                  false,
                  docDate)
            };
      }
    } catch (final Throwable e) {
      // close the writer (in finally)
      // throw new Parser.Failure(e.getMessage(), location);
    } finally {
      try {
        pdfDoc.close();
      } catch (final Throwable e) {
      }
    }

    // clear resources in pdfbox. they say that is resolved but it's not. see:
    // https://issues.apache.org/jira/browse/PDFBOX-313
    // https://issues.apache.org/jira/browse/PDFBOX-351
    // https://issues.apache.org/jira/browse/PDFBOX-441
    // the pdfbox still generates enormeous number of object allocations and don't delete these
    // the following Object are statically stored and never flushed:
    // COSFloat, COSArray, COSInteger, COSObjectKey, COSObject, COSDictionary,
    // COSStream, COSString, COSName, COSDocument, COSInteger[], COSNull
    // the great number of these objects can easily be seen in Java Visual VM
    // we try to get this shit out of the memory here by forced clear calls, hope the best the
    // rubbish gets out.
    pdfDoc = null;
    clean_up_idiotic_PDFParser_font_cache_which_eats_up_tons_of_megabytes();

    return result;
  }
  static final void sendRespondHeader(
      final HashMap<String, Object> conProp,
      final OutputStream respond,
      String httpVersion,
      final int httpStatusCode,
      String httpStatusText,
      ResponseHeader responseHeader)
      throws IOException {

    if (respond == null) throw new NullPointerException("The outputstream must not be null.");
    if (conProp == null)
      throw new NullPointerException("The connection property structure must not be null.");
    if (httpVersion == null)
      httpVersion = (String) conProp.get(HeaderFramework.CONNECTION_PROP_HTTP_VER);
    if (httpVersion == null) httpVersion = HeaderFramework.HTTP_VERSION_1_1;
    if (responseHeader == null) responseHeader = new ResponseHeader(httpStatusCode);

    try {
      if ((httpStatusText == null) || (httpStatusText.length() == 0)) {
        if (HeaderFramework.http1_1.containsKey(Integer.toString(httpStatusCode)))
          // http1_1 includes http1_0 messages
          httpStatusText = HeaderFramework.http1_1.get(Integer.toString(httpStatusCode));
        else httpStatusText = "Unknown";
      }

      final StringBuilder header = new StringBuilder(560);

      // "HTTP/0.9" does not have a status line or header in the response
      if (!httpVersion.toUpperCase().equals(HeaderFramework.HTTP_VERSION_0_9)) {
        // write status line
        header
            .append(httpVersion)
            .append(" ")
            .append(Integer.toString(httpStatusCode))
            .append(" ")
            .append(httpStatusText)
            .append("\r\n");

        // prepare header
        if (!responseHeader.containsKey(HeaderFramework.DATE))
          responseHeader.put(HeaderFramework.DATE, HeaderFramework.formatRFC1123(new Date()));
        if (!responseHeader.containsKey(HeaderFramework.CONTENT_TYPE))
          responseHeader.put(HeaderFramework.CONTENT_TYPE, "text/html; charset=UTF-8"); // fix this
        if (!responseHeader.containsKey(RequestHeader.CONNECTION)
            && conProp.containsKey(HeaderFramework.CONNECTION_PROP_PERSISTENT))
          responseHeader.put(
              RequestHeader.CONNECTION,
              (String) conProp.get(HeaderFramework.CONNECTION_PROP_PERSISTENT));
        if (!responseHeader.containsKey(RequestHeader.PROXY_CONNECTION)
            && conProp.containsKey(HeaderFramework.CONNECTION_PROP_PERSISTENT))
          responseHeader.put(
              RequestHeader.PROXY_CONNECTION,
              (String) conProp.get(HeaderFramework.CONNECTION_PROP_PERSISTENT));

        if (conProp.containsKey(HeaderFramework.CONNECTION_PROP_PERSISTENT)
            && conProp.get(HeaderFramework.CONNECTION_PROP_PERSISTENT).equals("keep-alive")
            && !responseHeader.containsKey(HeaderFramework.TRANSFER_ENCODING)
            && !responseHeader.containsKey(HeaderFramework.CONTENT_LENGTH))
          responseHeader.put(HeaderFramework.CONTENT_LENGTH, "0");

        // read custom headers
        final Iterator<ResponseHeader.Entry> it =
            responseHeader.getAdditionalHeaderProperties().iterator();
        ResponseHeader.Entry e;
        while (it.hasNext()) {
          // Append user properties to the main String
          // TODO: Should we check for user properites. What if they intersect properties that are
          // already in header?
          e = it.next();
          header.append(e.getKey()).append(": ").append(e.getValue()).append("\r\n");
        }

        // write header
        final Iterator<String> i = responseHeader.keySet().iterator();
        String key;
        char tag;
        int count;
        while (i.hasNext()) {
          key = i.next();
          tag = key.charAt(0);
          if ((tag != '*')
              && (tag != '#')) { // '#' in key is reserved for proxy attributes as artificial header
            // values
            count = responseHeader.keyCount(key);
            for (int j = 0; j < count; j++) {
              header
                  .append(key)
                  .append(": ")
                  .append(responseHeader.getSingle(key, j))
                  .append("\r\n");
            }
          }
        }

        // end header
        header.append("\r\n");

        // sending headers to the client
        respond.write(UTF8.getBytes(header.toString()));

        // flush stream
        respond.flush();
      }

      conProp.put(HeaderFramework.CONNECTION_PROP_PROXY_RESPOND_HEADER, responseHeader);
      conProp.put(
          HeaderFramework.CONNECTION_PROP_PROXY_RESPOND_STATUS, Integer.toString(httpStatusCode));
    } catch (final Exception e) {
      // any interruption may be caused be network error or because the user has closed
      // the windows during transmission. We simply pass it as IOException
      throw new IOException(e.getMessage());
    }
  }
  @Override
  public Request pop(boolean delay, CrawlSwitchboard cs, RobotsTxt robots) throws IOException {
    // returns a crawl entry from the stack and ensures minimum delta times
    long sleeptime = 0;
    Request crawlEntry = null;
    CrawlProfile profileEntry = null;
    synchronized (this) {
      mainloop:
      while (true) {
        Index depthStack = getLowestStack();
        if (depthStack == null) return null;
        Row.Entry rowEntry = null;
        while (depthStack.size() > 0) {
          rowEntry = depthStack.removeOne();
          if (rowEntry != null) break;
        }
        if (rowEntry == null) continue mainloop;
        crawlEntry = new Request(rowEntry);

        // check blacklist (again) because the user may have created blacklist entries after the
        // queue has been filled
        if (Switchboard.urlBlacklist.isListed(BlacklistType.CRAWLER, crawlEntry.url())) {
          if (log.isFine()) log.fine("URL '" + crawlEntry.url() + "' is in blacklist.");
          continue mainloop;
        }

        // at this point we must check if the crawlEntry has relevance because the crawl profile
        // still exists
        // if not: return null. A calling method must handle the null value and try again
        profileEntry = cs.get(UTF8.getBytes(crawlEntry.profileHandle()));
        if (profileEntry == null) {
          if (log.isFine()) log.fine("no profile entry for handle " + crawlEntry.profileHandle());
          continue mainloop;
        }

        // depending on the caching policy we need sleep time to avoid DoS-like situations
        sleeptime = Latency.getDomainSleepTime(robots, profileEntry, crawlEntry.url());
        break;
      }
    }
    if (crawlEntry == null) return null;
    ClientIdentification.Agent agent =
        profileEntry == null
            ? ClientIdentification.yacyInternetCrawlerAgent
            : profileEntry.getAgent();
    long robotsTime = Latency.getRobotsTime(robots, crawlEntry.url(), agent);
    Latency.updateAfterSelection(crawlEntry.url(), profileEntry == null ? 0 : robotsTime);
    if (delay && sleeptime > 0) {
      // force a busy waiting here
      // in best case, this should never happen if the balancer works properly
      // this is only to protection against the worst case, where the crawler could
      // behave in a DoS-manner
      if (log.isInfo())
        log.info(
            "forcing crawl-delay of "
                + sleeptime
                + " milliseconds for "
                + crawlEntry.url().getHost()
                + ": "
                + Latency.waitingRemainingExplain(crawlEntry.url(), robots, agent));
      long loops = sleeptime / 1000;
      long rest = sleeptime % 1000;
      if (loops < 3) {
        rest = rest + 1000 * loops;
        loops = 0;
      }
      Thread.currentThread()
          .setName(
              "Balancer waiting for "
                  + crawlEntry.url().getHost()
                  + ": "
                  + sleeptime
                  + " milliseconds");
      synchronized (this) {
        // must be synchronized here to avoid 'takeover' moves from other threads which then idle
        // the same time which would not be enough
        if (rest > 0) {
          try {
            this.wait(rest);
          } catch (final InterruptedException e) {
          }
        }
        for (int i = 0; i < loops; i++) {
          if (log.isInfo())
            log.info(
                "waiting for "
                    + crawlEntry.url().getHost()
                    + ": "
                    + (loops - i)
                    + " seconds remaining...");
          try {
            this.wait(1000);
          } catch (final InterruptedException e) {
          }
        }
      }
      Latency.updateAfterSelection(crawlEntry.url(), robotsTime);
    }
    return crawlEntry;
  }