/**
  * @param from
  * @param to
  * @throws IOException
  */
 private static void forceMove(final File from, final File to) throws IOException {
   if (!(to.delete() && from.renameTo(to))) {
     // do it manually
     Files.copy(from, to);
     FileUtils.deletedelete(from);
   }
 }
Exemple #2
0
  public OAIPMHLoader(
      final LoaderDispatcher loader,
      final DigestURI source,
      final File targetDir,
      final String filePrefix)
      throws IOException {
    this.source = source;

    // load the file from the net
    final Response response =
        loader.load(
            loader.request(source, false, true), CacheStrategy.NOCACHE, Integer.MAX_VALUE, true);
    final byte[] b = response.getContent();
    this.resumptionToken = new ResumptionToken(source, b);
    // System.out.println("*** ResumptionToken = " + this.resumptionToken.toString());
    final File f1 = new File(targetDir, OAIPMHImporter.filename4Source(source));
    final File f0 = new File(targetDir, f1.getName() + ".tmp");

    // transaction-safe writing
    FileUtils.copy(b, f0);
    f0.renameTo(f1);

    /*
    SurrogateReader sr = new SurrogateReader(new ByteArrayInputStream(b), 100);
    Thread srt = new Thread(sr);
    srt.start();
    DCEntry dce;
    while ((dce = sr.take()) != DCEntry.poison) {
        System.out.println(dce.toString());
    }
    try {
        srt.join();
    } catch (InterruptedException e) {}
    */
  }
  /**
   * Finds illegal entries in black list.
   *
   * @param blacklistToUse The blacklist to be checked.
   * @param blEngine The blacklist engine which is used to check
   * @param allowRegex Set to true to allow regular expressions in host part of blacklist entry.
   * @return A map which contains all entries whoch have been identified as being illegal by the
   *     blacklistEngine with the entry as key and an error code as value.
   */
  private static Map<String, BlacklistError> getIllegalEntries(
      final String blacklistToUse, final Blacklist blEngine, final boolean allowRegex) {
    final Map<String, BlacklistError> illegalEntries = new HashMap<String, BlacklistError>();
    final Set<String> legalEntries = new HashSet<String>();

    final List<String> list =
        FileUtils.getListArray(new File(ListManager.listsPath, blacklistToUse));
    final Map<String, String> properties = new HashMap<String, String>();
    properties.put("allowRegex", String.valueOf(allowRegex));

    BlacklistError err = BlacklistError.NO_ERROR;

    for (String element : list) {
      element = element.trim();

      // check for double-occurance
      if (legalEntries.contains(element)) {
        illegalEntries.put(element, BlacklistError.DOUBLE_OCCURANCE);
        continue;
      }
      legalEntries.add(element);

      err = blEngine.checkError(element, properties);

      if (err.getInt() > 0) {
        illegalEntries.put(element, err);
      }
    }

    return illegalEntries;
  }
 /**
  * Write elements of an Array of Strings to a file (one element per line).
  *
  * @param listFile the file to write to
  * @param list the Array to write
  * @return returns <code>true</code> if successful, <code>false</code> otherwise
  */
 public static boolean writeList(final File listFile, final String[] list) {
   final StringBuilder out = new StringBuilder(list.length * 40 + 1);
   for (final String element : list) {
     out.append(element).append(CR).append(LF);
   }
   return FileUtils.writeList(listFile, new String(out)); // (File, String)
 }
  /**
   * Removes existing entries from a blacklist.
   *
   * @param blacklistToUse The blacklist which contains the
   * @param supportedBlacklistTypes Types of blacklists which the entry is to changed in.
   * @param entries Array of entries to be deleted.
   * @return Length of the list of entries to be removed.
   */
  private static int removeEntries(
      final String blacklistToUse, final String[] supportedBlacklistTypes, final String[] entries) {
    // load blacklist data from file
    final List<String> list =
        FileUtils.getListArray(new File(ListManager.listsPath, blacklistToUse));

    boolean listChanged = false;

    // delete the old entry from file
    for (final String entry : entries) {
      String s = entry;

      if (list != null) {

        // get rid of escape characters which make it impossible to
        // properly use contains()
        if (s.contains("\\\\")) {
          s = s.replaceAll(Pattern.quote("\\\\"), Matcher.quoteReplacement("\\"));
        }

        if (list.contains(s)) {
          listChanged = list.remove(s);
        }
      }

      // remove the entry from the running blacklist engine
      for (final String supportedBlacklistType : supportedBlacklistTypes) {
        if (ListManager.listSetContains(supportedBlacklistType + ".BlackLists", blacklistToUse)) {
          final String host = (s.indexOf('/') == -1) ? s : s.substring(0, s.indexOf('/'));
          final String path = (s.indexOf('/') == -1) ? ".*" : s.substring(s.indexOf('/') + 1);
          try {
            Switchboard.urlBlacklist.remove(supportedBlacklistType, host, path);
          } catch (final RuntimeException e) {
            Log.logSevere("BLACKLIST-CLEANER", e.getMessage() + ": " + host + "/" + path);
          }
        }
      }
      SearchEventCache.cleanupEvents(true);
    }
    if (listChanged) {
      FileUtils.writeList(
          new File(ListManager.listsPath, blacklistToUse), list.toArray(new String[list.size()]));
    }
    return entries.length;
  }
Exemple #6
0
 private void resetProfiles() {
   this.profilesActiveCrawlsCache.clear();
   final File pdb = new File(this.queuesRoot, DBFILE_ACTIVE_CRAWL_PROFILES);
   if (pdb.exists()) FileUtils.deletedelete(pdb);
   try {
     this.profilesActiveCrawls =
         new MapHeap(pdb, Word.commonHashLength, NaturalOrder.naturalOrder, 1024 * 64, 500, ' ');
   } catch (final IOException e1) {
     Log.logException(e1);
     this.profilesActiveCrawls = null;
   }
   initActiveCrawlProfiles();
 }
Exemple #7
0
 public Document[] parse(
     final MultiProtocolURI location,
     final String mimeType,
     final String charset,
     final InputStream source)
     throws Parser.Failure, InterruptedException {
   try {
     final ByteArrayOutputStream cfos = new ByteArrayOutputStream();
     FileUtils.copy(source, cfos);
     return parse(location, mimeType, charset, new ByteArrayInputStream(cfos.toByteArray()));
   } catch (final IOException e) {
     throw new Parser.Failure("error processing 7zip archive: " + e.getMessage(), location);
   }
 }
  /**
   * main - writes some data and checks the tables size (with time measureing)
   *
   * @param args
   */
  public static void main(final String[] args) {
    // open a file, add one entry and exit
    final File f = new File(args[0]);
    if (f.exists()) FileUtils.deletedelete(f);
    try {
      final Records t = new Records(f, 8);
      final byte[] b = new byte[8];
      t.add("01234567".getBytes(), 0);
      t.add("ABCDEFGH".getBytes(), 0);
      t.add("abcdefgh".getBytes(), 0);
      t.add("--------".getBytes(), 0);
      t.add("********".getBytes(), 0);
      for (int i = 0; i < 1000; i++) t.add("++++++++".getBytes(), 0);
      t.add("=======0".getBytes(), 0);
      t.add("=======1".getBytes(), 0);
      t.add("=======2".getBytes(), 0);
      t.cleanLast(b, 0);
      System.out.println(UTF8.String(b));
      t.cleanLast(b, 0);
      // t.clean(2, b, 0);
      System.out.println(UTF8.String(b));
      t.get(1, b, 0);
      System.out.println(UTF8.String(b));
      t.put(1, "AbCdEfGh".getBytes(), 0);
      t.get(1, b, 0);
      System.out.println(UTF8.String(b));
      t.get(3, b, 0);
      System.out.println(UTF8.String(b));
      t.get(4, b, 0);
      System.out.println(UTF8.String(b));
      System.out.println("size = " + t.size());
      // t.clean(t.size() - 2);
      t.cleanLast();
      final long start = System.currentTimeMillis();
      long c = 0;
      for (int i = 0; i < 100000; i++) {
        c = t.size();
      }
      System.out.println(
          "size() needs " + ((System.currentTimeMillis() - start) / 100) + " nanoseconds");
      System.out.println("size = " + c);

      t.close();
    } catch (final IOException e) {
      ConcurrentLog.logException(e);
    }
  }
  public static serverObjects respond(
      @SuppressWarnings("unused") final RequestHeader header,
      @SuppressWarnings("unused") final serverObjects post,
      @SuppressWarnings("unused") final serverSwitch env) {

    final serverObjects prop = new serverObjects();

    final Collection<String> dirlist =
        FileUtils.getDirListing(ListManager.listsPath, Blacklist.BLACKLIST_FILENAME_FILTER);
    final int lastBlacklistCount = dirlist.size() - 1;

    int blacklistCount = 0;
    if (dirlist != null) {
      for (final String element : dirlist) {
        prop.putXML(PREFIX_LISTS + blacklistCount + POSTFIX_NAME, element);

        prop.put(
            PREFIX_LISTS + blacklistCount + POSTFIX_SHARED,
            ListManager.listSetContains(BLACK_LISTS_SHARED, element));

        int j = 0;
        for (final BlacklistType type : BLACKLIST_TYPE_VALUES) {
          prop.putXML(
              PREFIX_LISTS + blacklistCount + INFIX_TYPES + j + POSTFIX_NAME, type.toString());
          prop.put(
              PREFIX_LISTS + blacklistCount + INFIX_TYPES + j + POSTFIX_VALUE,
              ListManager.listSetContains(type + TYPES_EXT, element));

          prop.put(
              PREFIX_LISTS + blacklistCount + INFIX_TYPES + j + POSTFIX_COMMA, j < lastTypeIndex);

          j++;
        }
        prop.put(PREFIX_LISTS + blacklistCount + POSTFIX_TYPES, BLACKLIST_TYPE_VALUES.length);

        prop.put(
            PREFIX_LISTS + blacklistCount + POSTFIX_COMMA, blacklistCount < lastBlacklistCount);

        blacklistCount++;
      }
    }
    prop.put(LISTS, blacklistCount);

    return prop;
  }
Exemple #10
0
  private static int importFromBookmarks(
      BookmarksDB db,
      final DigestURI baseURL,
      final InputStreamReader input,
      final String tag,
      final boolean importPublic) {

    int importCount = 0;

    Map<MultiProtocolURI, Properties> links = new HashMap<MultiProtocolURI, Properties>();
    String title;
    MultiProtocolURI url;
    Bookmark bm;
    final Set<String> tags = ListManager.string2set(tag); // this allow multiple default tags
    try {
      // load the links
      final ContentScraper scraper = new ContentScraper(baseURL);
      // OutputStream os = new htmlFilterOutputStream(null, scraper, null, false);
      final Writer writer = new TransformerWriter(null, null, scraper, null, false);
      FileUtils.copy(input, writer);
      writer.close();
      links = scraper.getAnchors();
    } catch (final IOException e) {
      Log.logWarning(
          "BOOKMARKS", "error during load of links: " + e.getClass() + " " + e.getMessage());
    }
    for (final Entry<MultiProtocolURI, Properties> link : links.entrySet()) {
      url = link.getKey();
      title = link.getValue().getProperty("name", "");
      Log.logInfo("BOOKMARKS", "links.get(url)");
      if ("".equals(title)) { // cannot be displayed
        title = url.toString();
      }
      bm = db.new Bookmark(url.toString());
      bm.setProperty(Bookmark.BOOKMARK_TITLE, title);
      bm.setTags(tags);
      bm.setPublic(importPublic);
      db.saveBookmark(bm);

      importCount++;
    }

    return importCount;
  }
Exemple #11
0
 /**
  * Loads crawl profiles from a DB file.
  *
  * @param file DB file
  * @return crawl profile data
  */
 private MapHeap loadFromDB(final File file) {
   MapHeap ret;
   try {
     ret =
         new MapHeap(file, Word.commonHashLength, NaturalOrder.naturalOrder, 1024 * 64, 500, ' ');
   } catch (final IOException e) {
     Log.logException(e);
     Log.logException(e);
     FileUtils.deletedelete(file);
     try {
       ret =
           new MapHeap(
               file, Word.commonHashLength, NaturalOrder.naturalOrder, 1024 * 64, 500, ' ');
     } catch (final IOException e1) {
       Log.logException(e1);
       ret = null;
     }
   }
   return ret;
 }
Exemple #12
0
  public Document[] parse(
      final MultiProtocolURI location,
      final String mimeType,
      final String charset,
      final InputStream source)
      throws Parser.Failure, InterruptedException {

    File tempFile = null;
    Document[] docs = null;
    try {
      int read = 0;
      final byte[] data = new byte[1024];

      final GZIPInputStream zippedContent = new GZIPInputStream(source);

      tempFile = File.createTempFile("gunzip", "tmp");
      tempFile.deleteOnExit();

      // creating a temp file to store the uncompressed data
      final FileOutputStream out = new FileOutputStream(tempFile);

      // reading gzip file and store it uncompressed
      while ((read = zippedContent.read(data, 0, 1024)) != -1) {
        out.write(data, 0, read);
      }
      zippedContent.close();
      out.close();

      // creating a new parser class to parse the unzipped content
      docs = TextParser.parseSource(location, null, null, tempFile, false);
    } catch (final Exception e) {
      if (e instanceof InterruptedException) throw (InterruptedException) e;
      if (e instanceof Parser.Failure) throw (Parser.Failure) e;

      throw new Parser.Failure(
          "Unexpected error while parsing gzip file. " + e.getMessage(), location);
    } finally {
      if (tempFile != null) FileUtils.deletedelete(tempFile);
    }
    return docs;
  }
 public static void main(String[] args) {
   try {
     byte[] b = FileUtils.read(new File(args[0]));
     torrentParser parser = new torrentParser();
     Document[] d =
         parser.parse(
             new DigestURL("http://localhost/test.torrent"),
             null,
             StandardCharsets.UTF_8.name(),
             new VocabularyScraper(),
             0,
             new ByteArrayInputStream(b));
     Condenser c = new Condenser(d[0], null, true, true, LibraryProvider.dymLib, false, false, 0);
     Map<String, Word> w = c.words();
     for (Map.Entry<String, Word> e : w.entrySet())
       System.out.println("Word: " + e.getKey() + " - " + e.getValue().posInText);
   } catch (final IOException e) {
     e.printStackTrace();
   } catch (final Parser.Failure e) {
     e.printStackTrace();
   } catch (final InterruptedException e) {
     e.printStackTrace();
   }
 }
  /**
   * test
   *
   * @param args
   */
  public static void main(final String[] args) {
    if (args.length > 0 && args[0].length() > 0) {
      // file
      final File pdfFile = new File(args[0]);
      if (pdfFile.canRead()) {

        System.out.println(pdfFile.getAbsolutePath());
        final long startTime = System.currentTimeMillis();

        // parse
        final AbstractParser parser = new pdfParser();
        Document document = null;
        try {
          document =
              Document.mergeDocuments(
                  null,
                  "application/pdf",
                  parser.parse(
                      null,
                      "application/pdf",
                      null,
                      new VocabularyScraper(),
                      0,
                      new FileInputStream(pdfFile)));
        } catch (final Parser.Failure e) {
          System.err.println("Cannot parse file " + pdfFile.getAbsolutePath());
          ConcurrentLog.logException(e);
        } catch (final InterruptedException e) {
          System.err.println("Interrupted while parsing!");
          ConcurrentLog.logException(e);
        } catch (final NoClassDefFoundError e) {
          System.err.println("class not found: " + e.getMessage());
        } catch (final FileNotFoundException e) {
          ConcurrentLog.logException(e);
        }

        // statistics
        System.out.println("\ttime elapsed: " + (System.currentTimeMillis() - startTime) + " ms");

        // output
        if (document == null) {
          System.out.println("\t!!!Parsing without result!!!");
        } else {
          System.out.println(
              "\tParsed text with "
                  + document.getTextLength()
                  + " chars of text and "
                  + document.getAnchors().size()
                  + " anchors");
          try {
            // write file
            FileUtils.copy(document.getTextStream(), new File("parsedPdf.txt"));
          } catch (final IOException e) {
            System.err.println("error saving parsed document");
            ConcurrentLog.logException(e);
          }
        }
      } else {
        System.err.println("Cannot read file " + pdfFile.getAbsolutePath());
      }
    } else {
      System.out.println("Please give a filename as first argument.");
    }
  }
 @Override
 public Document[] parse(
     final DigestURL location,
     final String mimeType,
     final String charset,
     final VocabularyScraper scraper,
     final int timezoneOffset,
     final InputStream source)
     throws Parser.Failure, InterruptedException {
   byte[] b = null;
   try {
     b = FileUtils.read(source);
   } catch (final IOException e1) {
     throw new Parser.Failure(e1.toString(), location);
   }
   final BDecoder bd = new BDecoder(b);
   final BObject bo = bd.parse();
   if (bo == null) throw new Parser.Failure("BDecoder.parse returned null", location);
   if (bo.getType() != BType.dictionary)
     throw new Parser.Failure("BDecoder object is not a dictionary", location);
   final Map<String, BObject> map = bo.getMap();
   final BObject commento = map.get("comment");
   final String comment = (commento == null) ? "" : UTF8.String(commento.getString());
   // Date creation = new Date(map.get("creation date").getInteger());
   final BObject infoo = map.get("info");
   final StringBuilder filenames = new StringBuilder(80);
   String title = "";
   if (infoo != null) {
     final Map<String, BObject> info = infoo.getMap();
     final BObject fileso = info.get("files");
     if (fileso != null) {
       final List<BObject> filelist = fileso.getList();
       for (final BObject fo : filelist) {
         final BObject patho = fo.getMap().get("path");
         if (patho != null) {
           final List<BObject> l = patho.getList(); // one file may have several names
           for (final BObject fl : l) {
             filenames.append(fl.toString()).append(" ");
           }
         }
       }
     }
     final BObject nameo = info.get("name");
     if (nameo != null) title = UTF8.String(nameo.getString());
   }
   if (title == null || title.isEmpty()) title = MultiProtocolURL.unescape(location.getFileName());
   return new Document[] {
     new Document(
         location,
         mimeType,
         charset,
         this,
         null,
         null,
         singleList(title), // title
         comment, // author
         location.getHost(),
         null,
         null,
         0.0d,
         0.0d,
         filenames.toString(),
         null,
         null,
         null,
         false,
         new Date())
   };
 }
  public static serverObjects respond(
      final RequestHeader header, final serverObjects post, final serverSwitch env) {
    final serverObjects prop = new serverObjects();

    // initialize the list manager
    ListManager.switchboard = (Switchboard) env;
    ListManager.listsPath =
        new File(env.getDataPath(), env.getConfig("listManager.listsPath", "DATA/LISTS"));
    String blacklistToUse = null;

    // get the list of supported blacklist types
    final String supportedBlacklistTypesStr = Blacklist.BLACKLIST_TYPES_STRING;
    final String[] supportedBlacklistTypes = supportedBlacklistTypesStr.split(",");

    prop.put(DISABLED + "checked", "1");

    if (post != null) {

      final boolean allowRegex =
          post.get("allowRegex", "off").equalsIgnoreCase("on") ? true : false;
      prop.put(DISABLED + "checked", (allowRegex) ? "1" : "0");

      if (post.containsKey("listNames")) {
        blacklistToUse = post.get("listNames");
        if (blacklistToUse.length() == 0
            || !ListManager.listSetContains("listManager.listsPath", blacklistToUse)) {
          prop.put("results", "2");
        }
      }

      putBlacklists(
          prop,
          FileUtils.getDirListing(ListManager.listsPath, BLACKLIST_FILENAME_FILTER),
          blacklistToUse);

      if (blacklistToUse != null) {
        prop.put("results", "1");

        if (post.containsKey("delete")) {
          prop.put(RESULTS + "modified", "1");
          prop.put(
              RESULTS + "modified_delCount",
              removeEntries(
                  blacklistToUse, supportedBlacklistTypes, getKeysByPrefix(post, "select", true)));
        } else if (post.containsKey("alter")) {
          prop.put(RESULTS + "modified", "2");
          prop.put(
              RESULTS + "modified_alterCount",
              alterEntries(
                  blacklistToUse,
                  supportedBlacklistTypes,
                  getKeysByPrefix(post, "select", false),
                  getValuesByPrefix(post, "entry", false)));
        }

        // list illegal entries
        final Map<String, BlacklistError> illegalEntries =
            getIllegalEntries(blacklistToUse, Switchboard.urlBlacklist, allowRegex);
        prop.put(RESULTS + "blList", blacklistToUse);
        prop.put(RESULTS + "entries", illegalEntries.size());
        prop.putHTML(RESULTS + "blEngine", Switchboard.urlBlacklist.getEngineInfo());
        prop.put(RESULTS + "disabled", (illegalEntries.isEmpty()) ? "1" : "0");
        if (!illegalEntries.isEmpty()) {
          prop.put(RESULTS + DISABLED + "entries", illegalEntries.size());
          int i = 0;
          String key;
          for (final Entry<String, BlacklistError> entry : illegalEntries.entrySet()) {
            key = entry.getKey();
            prop.put(RESULTS + DISABLED + ENTRIES + i + "_error", entry.getValue().getLong());
            prop.putHTML(RESULTS + DISABLED + ENTRIES + i + "_entry", key);
            i++;
          }
        }
      }
    } else {
      prop.put("results", "0");
      putBlacklists(
          prop,
          FileUtils.getDirListing(ListManager.listsPath, BLACKLIST_FILENAME_FILTER),
          blacklistToUse);
    }

    return prop;
  }
Exemple #17
0
 public static String getListString(final String filename, final boolean withcomments) {
   return FileUtils.getListString(new File(listsPath, filename), withcomments);
 }
Exemple #18
0
  public static serverObjects respond(
      final RequestHeader header, final serverObjects post, final serverSwitch env) {
    // return variable that accumulates replacements
    final serverObjects prop = new serverObjects();
    final Switchboard sb = (Switchboard) env;

    // set if this should be visible
    if (yacyBuildProperties.isPkgManager()) {
      prop.put("candeploy", "2");
      return prop;
    } else if (OS.canExecUnix || OS.isWindows) {
      // we can deploy a new system with (i.e.)
      // cd DATA/RELEASE;tar xfz $1;cp -Rf yacy/* ../../;rm -Rf yacy
      prop.put("candeploy", "1");
    } else {
      prop.put("candeploy", "0");
    }

    prop.put("candeploy_configCommit", "0");
    prop.put("candeploy_autoUpdate", "0");
    prop.put("candeploy_downloadsAvailable", "0");

    if (post != null) {
      // check if update is supposed to be installed and a release is defined
      if (post.containsKey("update") && !post.get("releaseinstall", "").isEmpty()) {
        prop.put("forwardToSteering", "1");
        prop.putHTML("forwardToSteering_release", post.get("releaseinstall", ""));
        prop.put("deploys", "1");
        prop.put("candeploy", "2"); // display nothing else
        return prop;
      }

      if (post.containsKey("downloadRelease")) {
        // download a release
        final String release = post.get("releasedownload", "");
        if (!release.isEmpty()) {
          try {
            yacyRelease versionToDownload = new yacyRelease(new DigestURI(release));

            // replace this version with version which contains public key
            final yacyRelease.DevAndMainVersions allReleases =
                yacyRelease.allReleases(false, false);
            final Set<yacyRelease> mostReleases =
                versionToDownload.isMainRelease() ? allReleases.main : allReleases.dev;
            for (final yacyRelease rel : mostReleases) {
              if (rel.equals(versionToDownload)) {
                versionToDownload = rel;
                break;
              }
            }
            versionToDownload.downloadRelease();
          } catch (final IOException e) {
            // TODO Auto-generated catch block
            Log.logException(e);
          }
        }
      }

      if (post.containsKey("checkRelease")) {
        yacyRelease.allReleases(true, false);
      }

      if (post.containsKey("deleteRelease")) {
        final String release = post.get("releaseinstall", "");
        if (!release.isEmpty()) {
          try {
            FileUtils.deletedelete(new File(sb.releasePath, release));
            FileUtils.deletedelete(new File(sb.releasePath, release + ".sig"));
          } catch (final NullPointerException e) {
            sb.getLog()
                .logSevere(
                    "AUTO-UPDATE: could not delete release " + release + ": " + e.getMessage());
          }
        }
      }

      if (post.containsKey("autoUpdate")) {
        final yacyRelease updateVersion = yacyRelease.rulebasedUpdateInfo(true);
        if (updateVersion == null) {
          prop.put("candeploy_autoUpdate", "2"); // no more recent release found
        } else {
          // there is a version that is more recent. Load it and re-start with it
          sb.getLog()
              .logInfo("AUTO-UPDATE: downloading more recent release " + updateVersion.getUrl());
          final File downloaded = updateVersion.downloadRelease();
          prop.putHTML("candeploy_autoUpdate_downloadedRelease", updateVersion.getName());
          final boolean devenvironment = new File(sb.getAppPath(), ".svn").exists();
          if (devenvironment) {
            sb.getLog()
                .logInfo("AUTO-UPDATE: omitting update because this is a development environment");
            prop.put("candeploy_autoUpdate", "3");
          } else if ((downloaded == null) || (!downloaded.exists()) || (downloaded.length() == 0)) {
            sb.getLog()
                .logInfo(
                    "AUTO-UPDATE: omitting update because download failed (file cannot be found, is too small or signature was bad)");
            prop.put("candeploy_autoUpdate", "4");
          } else {
            yacyRelease.deployRelease(downloaded);
            sb.terminate(10, "manual release update to " + downloaded.getName());
            sb.getLog().logInfo("AUTO-UPDATE: deploy and restart initiated");
            prop.put("candeploy_autoUpdate", "1");
          }
        }
      }

      if (post.containsKey("configSubmit")) {
        prop.put("candeploy_configCommit", "1");
        sb.setConfig(
            "update.process",
            ("manual".equals(post.get("updateMode", "manual"))) ? "manual" : "auto");
        sb.setConfig("update.cycle", Math.max(12, post.getLong("cycle", 168)));
        sb.setConfig("update.blacklist", post.get("blacklist", ""));
        sb.setConfig(
            "update.concept", ("any".equals(post.get("releaseType", "any"))) ? "any" : "main");
        sb.setConfig(
            "update.onlySignedFiles", (post.getBoolean("onlySignedFiles", false)) ? "1" : "0");
      }
    }

    // version information
    final String versionstring =
        yacyBuildProperties.getVersion() + "/" + yacyBuildProperties.getSVNRevision();
    prop.putHTML("candeploy_versionpp", versionstring);
    final boolean devenvironment = new File(sb.getAppPath(), ".svn").exists();
    float thisVersion = Float.parseFloat(yacyBuildProperties.getVersion());
    // cut off the SVN Rev in the Version
    try {
      thisVersion = (float) (Math.round(thisVersion * 1000.0) / 1000.0);
    } catch (final NumberFormatException e) {
    }

    // list downloaded releases
    final File[] downloadedFiles = sb.releasePath.listFiles();
    // list can be null if RELEASE directory has been deleted manually
    final int downloadedFilesNum = (downloadedFiles == null) ? 0 : downloadedFiles.length;

    prop.put(
        "candeploy_deployenabled",
        (downloadedFilesNum == 0)
            ? "0"
            : ((devenvironment) ? "1" : "2")); // prevent that a developer-version is over-deployed

    final NavigableSet<yacyRelease> downloadedReleases = new TreeSet<yacyRelease>();
    for (final File downloaded : downloadedFiles) {
      try {
        final yacyRelease release = new yacyRelease(downloaded);
        downloadedReleases.add(release);
      } catch (final RuntimeException e) {
        // not a valid release
        // can be also a restart- or deploy-file
        final File invalid = downloaded;
        if (!(invalid.getName().endsWith(".bat")
            || invalid.getName().endsWith(".sh")
            || invalid
                .getName()
                .endsWith(".sig"))) { // Windows & Linux don't like deleted scripts while execution!
          invalid.deleteOnExit();
        }
      }
    }
    // latest downloaded release
    final yacyVersion dflt = (downloadedReleases.isEmpty()) ? null : downloadedReleases.last();
    // check if there are any downloaded releases and if there are enable the update buttons
    prop.put("candeploy_downloadsAvailable", (downloadedReleases.isEmpty()) ? "0" : "1");
    prop.put(
        "candeploy_deployenabled_buttonsActive",
        (downloadedReleases.isEmpty() || devenvironment) ? "0" : "1");

    int relcount = 0;
    for (final yacyRelease release : downloadedReleases) {
      prop.put(
          "candeploy_downloadedreleases_" + relcount + "_name",
          ((release.isMainRelease()) ? "main" : "dev")
              + " "
              + release.getReleaseNr()
              + "/"
              + release.getSvn());
      prop.put(
          "candeploy_downloadedreleases_" + relcount + "_signature",
          (release.getSignatureFile().exists() ? "1" : "0"));
      prop.putHTML("candeploy_downloadedreleases_" + relcount + "_file", release.getName());
      prop.put(
          "candeploy_downloadedreleases_" + relcount + "_selected", (release == dflt) ? "1" : "0");
      relcount++;
    }
    prop.put("candeploy_downloadedreleases", relcount);

    // list remotely available releases
    final yacyRelease.DevAndMainVersions releasess = yacyRelease.allReleases(false, false);
    relcount = 0;

    final ArrayList<yacyRelease> rlist = new ArrayList<yacyRelease>();
    final Set<yacyRelease> remoteDevReleases = releasess.dev;
    remoteDevReleases.removeAll(downloadedReleases);
    for (final yacyRelease release : remoteDevReleases) {
      rlist.add(release);
    }
    final Set<yacyRelease> remoteMainReleases = releasess.main;
    remoteMainReleases.removeAll(downloadedReleases);
    for (final yacyRelease release : remoteMainReleases) {
      rlist.add(release);
    }
    yacyRelease release;
    for (int i = rlist.size() - 1; i >= 0; i--) {
      release = rlist.get(i);
      prop.put(
          "candeploy_availreleases_" + relcount + "_name",
          ((release.isMainRelease()) ? "main" : "dev")
              + " "
              + release.getReleaseNr()
              + "/"
              + release.getSvn());
      prop.put("candeploy_availreleases_" + relcount + "_url", release.getUrl().toString());
      prop.put(
          "candeploy_availreleases_" + relcount + "_signatures",
          (release.getPublicKey() != null ? "1" : "0"));
      prop.put("candeploy_availreleases_" + relcount + "_selected", (relcount == 0) ? "1" : "0");
      relcount++;
    }

    prop.put("candeploy_availreleases", relcount);

    // properties for automated system update
    prop.put(
        "candeploy_manualUpdateChecked",
        ("manual".equals(sb.getConfig("update.process", "manual"))) ? "1" : "0");
    prop.put(
        "candeploy_autoUpdateChecked",
        ("auto".equals(sb.getConfig("update.process", "manual"))) ? "1" : "0");
    prop.put("candeploy_cycle", sb.getConfigLong("update.cycle", 168));
    prop.putHTML("candeploy_blacklist", sb.getConfig("update.blacklist", ""));
    prop.put(
        "candeploy_releaseTypeMainChecked",
        ("any".equals(sb.getConfig("update.concept", "any"))) ? "0" : "1");
    prop.put(
        "candeploy_releaseTypeAnyChecked",
        ("any".equals(sb.getConfig("update.concept", "any"))) ? "1" : "0");
    prop.put("candeploy_lastlookup", (sb.getConfigLong("update.time.lookup", 0) == 0) ? "0" : "1");
    prop.put(
        "candeploy_lastlookup_time",
        new Date(sb.getConfigLong("update.time.lookup", 0)).toString());
    prop.put(
        "candeploy_lastdownload", (sb.getConfigLong("update.time.download", 0) == 0) ? "0" : "1");
    prop.put(
        "candeploy_lastdownload_time",
        new Date(sb.getConfigLong("update.time.download", 0)).toString());
    prop.put("candeploy_lastdeploy", (sb.getConfigLong("update.time.deploy", 0) == 0) ? "0" : "1");
    prop.put(
        "candeploy_lastdeploy_time",
        new Date(sb.getConfigLong("update.time.deploy", 0)).toString());
    prop.put(
        "candeploy_onlySignedFiles",
        ("1".equals(sb.getConfig("update.onlySignedFiles", "1"))) ? "1" : "0");

    /*
    if ((adminaccess) && (yacyVersion.latestRelease >= (thisVersion+0.01))) { // only new Versions(not new SVN)
        if ((yacyVersion.latestMainRelease != null) ||
            (yacyVersion.latestDevRelease != null)) {
            prop.put("hintVersionDownload", 1);
        } else if ((post != null) && (post.containsKey("aquirerelease"))) {
            yacyVersion.aquireLatestReleaseInfo();
            prop.put("hintVersionDownload", 1);
        } else {
            prop.put("hintVersionAvailable", 1);
        }
    }
    prop.put("hintVersionAvailable", 1); // for testing

    prop.putASIS("hintVersionDownload_versionResMain", (yacyVersion.latestMainRelease == null) ? "-" : yacyVersion.latestMainRelease.toAnchor());
    prop.putASIS("hintVersionDownload_versionResDev", (yacyVersion.latestDevRelease == null) ? "-" : yacyVersion.latestDevRelease.toAnchor());
    prop.put("hintVersionAvailable_latestVersion", Float.toString(yacyVersion.latestRelease));
     */

    return prop;
  }
  public static serverObjects respond(
      @SuppressWarnings("unused") final RequestHeader header,
      final serverObjects post,
      final serverSwitch env) {
    final Switchboard sb = (Switchboard) env;
    final serverObjects prop = new serverObjects();
    Collection<Tagging> vocs = LibraryProvider.autotagging.getVocabularies();

    String vocabularyName = (post == null) ? null : post.get("vocabulary", null);
    String discovername = (post == null) ? null : post.get("discovername", null);
    Tagging vocabulary =
        vocabularyName == null ? null : LibraryProvider.autotagging.getVocabulary(vocabularyName);
    if (vocabulary == null) vocabularyName = null;
    if (post != null) {
      try {
        // create a vocabulary
        if (vocabulary == null && discovername != null && discovername.length() > 0) {
          // store this call as api call
          sb.tables.recordAPICall(
              post,
              "Vocabulary_p.html",
              WorkTables.TABLE_API_TYPE_CRAWLER,
              "vocabulary creation for " + discovername);
          // get details of creation
          String discoverobjectspace = post.get("discoverobjectspace", "");
          MultiProtocolURL discoveruri = null;
          if (discoverobjectspace.length() > 0)
            try {
              discoveruri = new MultiProtocolURL(discoverobjectspace);
            } catch (final MalformedURLException e) {
            }
          if (discoveruri == null) discoverobjectspace = "";
          Map<String, Tagging.SOTuple> table = new LinkedHashMap<String, Tagging.SOTuple>();
          File propFile = LibraryProvider.autotagging.getVocabularyFile(discovername);
          final boolean discoverNot = post.get("discovermethod", "").equals("none");
          final boolean discoverFromPath = post.get("discovermethod", "").equals("path");
          final boolean discoverFromTitle = post.get("discovermethod", "").equals("title");
          final boolean discoverFromTitleSplitted =
              post.get("discovermethod", "").equals("titlesplitted");
          final boolean discoverFromAuthor = post.get("discovermethod", "").equals("author");
          final boolean discoverFromCSV = post.get("discovermethod", "").equals("csv");
          final String discoverFromCSVPath = post.get("discoverpath", "").replaceAll("%20", " ");
          String discoverFromCSVCharset = post.get("charset", StandardCharsets.UTF_8.name());
          final int discovercolumnliteral = post.getInt("discovercolumnliteral", 0);
          final int discovercolumnsynonyms = post.getInt("discovercolumnsynonyms", -1);
          final int discovercolumnobjectlink = post.getInt("discovercolumnobjectlink", -1);
          final File discoverFromCSVFile =
              discoverFromCSVPath.length() > 0 ? new File(discoverFromCSVPath) : null;
          final boolean discoverenrichsynonyms =
              post.get("discoversynonymsmethod", "none").equals("enrichsynonyms");
          final boolean discoverreadcolumn =
              post.get("discoversynonymsmethod", "none").equals("readcolumn");
          Segment segment = sb.index;
          String t;
          if (!discoverNot) {
            if (discoverFromCSV && discoverFromCSVFile != null && discoverFromCSVFile.exists()) {
              // auto-detect charset, used code from http://jchardet.sourceforge.net/; see also:
              // http://www-archive.mozilla.org/projects/intl/chardet.html
              if (discoverFromCSVCharset.equals("autodetect")) {
                List<String> charsets = FileUtils.detectCharset(discoverFromCSVFile);
                discoverFromCSVCharset = charsets.get(0);
                ConcurrentLog.info(
                    "FileUtils",
                    "detected charset: "
                        + discoverFromCSVCharset
                        + " used to read "
                        + discoverFromCSVFile.toString());
              }
              // read file (try-with-resource to close inputstream automatically)
              try (BufferedReader r =
                  new BufferedReader(
                      new InputStreamReader(
                          new FileInputStream(discoverFromCSVFile), discoverFromCSVCharset))) {
                String line = null;
                Pattern semicolon = Pattern.compile(";");
                Map<String, String> synonym2literal =
                    new HashMap<>(); // helper map to check if there are double synonyms
                while ((line = r.readLine()) != null) {
                  if (line.length() == 0) continue;
                  String[] l = semicolon.split(line);
                  if (l.length == 0) l = new String[] {line};
                  String literal =
                      discovercolumnliteral < 0 || l.length <= discovercolumnliteral
                          ? null
                          : l[discovercolumnliteral].trim();
                  if (literal == null) continue;
                  literal = normalizeLiteral(literal);
                  String objectlink =
                      discovercolumnobjectlink < 0 || l.length <= discovercolumnobjectlink
                          ? null
                          : l[discovercolumnobjectlink].trim();
                  if (literal.length() > 0) {
                    String synonyms = "";
                    if (discoverenrichsynonyms) {
                      Set<String> sy = SynonymLibrary.getSynonyms(literal);
                      if (sy != null) {
                        for (String s : sy) synonyms += "," + s;
                      }
                    } else if (discoverreadcolumn) {
                      synonyms =
                          discovercolumnsynonyms < 0 || l.length <= discovercolumnsynonyms
                              ? null
                              : l[discovercolumnsynonyms].trim();
                      synonyms = normalizeLiteral(synonyms);
                    } else {
                      synonyms = Tagging.normalizeTerm(literal);
                    }
                    // check double synonyms
                    if (synonyms.length() > 0) {
                      String oldliteral = synonym2literal.get(synonyms);
                      if (oldliteral != null && !literal.equals(oldliteral)) {
                        // replace old entry with combined new
                        table.remove(oldliteral);
                        String newliteral = oldliteral + "," + literal;
                        literal = newliteral;
                      }
                      synonym2literal.put(synonyms, literal);
                    }
                    // store term
                    table.put(
                        literal,
                        new Tagging.SOTuple(synonyms, objectlink == null ? "" : objectlink));
                  }
                }
              }
            } else {
              Iterator<DigestURL> ui = segment.urlSelector(discoveruri, Long.MAX_VALUE, 100000);
              while (ui.hasNext()) {
                DigestURL u = ui.next();
                String u0 = u.toNormalform(true);
                t = "";
                if (discoverFromPath) {
                  int exp = u0.lastIndexOf('.');
                  if (exp < 0) continue;
                  int slp = u0.lastIndexOf('/', exp);
                  if (slp < 0) continue;
                  t = u0.substring(slp, exp);
                  int p;
                  while ((p = t.indexOf(':')) >= 0) t = t.substring(p + 1);
                  while ((p = t.indexOf('=')) >= 0) t = t.substring(p + 1);
                }
                if (discoverFromTitle || discoverFromTitleSplitted) {
                  URIMetadataNode m = segment.fulltext().getMetadata(u.hash());
                  if (m != null) t = m.dc_title();
                  if (t.endsWith(".jpg") || t.endsWith(".gif")) continue;
                }
                if (discoverFromAuthor) {
                  URIMetadataNode m = segment.fulltext().getMetadata(u.hash());
                  if (m != null) t = m.dc_creator();
                }
                t =
                    t.replaceAll("_", " ")
                        .replaceAll("\"", " ")
                        .replaceAll("'", " ")
                        .replaceAll(",", " ")
                        .replaceAll("  ", " ")
                        .trim();
                if (t.isEmpty()) continue;
                if (discoverFromTitleSplitted) {
                  String[] ts = CommonPattern.SPACES.split(t);
                  for (String s : ts) {
                    if (s.isEmpty()) continue;
                    if (s.endsWith(".jpg") || s.endsWith(".gif")) continue;
                    table.put(s, new Tagging.SOTuple(Tagging.normalizeTerm(s), u0));
                  }
                } else if (discoverFromAuthor) {
                  String[] ts =
                      CommonPattern.SEMICOLON.split(t); // author names are often separated by ';'
                  for (String s : ts) {
                    if (s.isEmpty()) continue;
                    int p =
                        s.indexOf(','); // check if there is a reversed method to mention the name
                    if (p >= 0) s = s.substring(p + 1).trim() + " " + s.substring(0, p).trim();
                    table.put(s, new Tagging.SOTuple(Tagging.normalizeTerm(s), u0));
                  }
                } else {
                  table.put(t, new Tagging.SOTuple(Tagging.normalizeTerm(t), u0));
                }
              }
            }
          }
          Tagging newvoc = new Tagging(discovername, propFile, discoverobjectspace, table);
          LibraryProvider.autotagging.addVocabulary(newvoc);
          vocabularyName = discovername;
          vocabulary = newvoc;
        } else if (vocabulary != null) {
          // check if objectspace was set
          vocabulary.setObjectspace(
              post.get(
                  "objectspace",
                  vocabulary.getObjectspace() == null ? "" : vocabulary.getObjectspace()));

          // check if a term was added
          if (post.get("add_new", "").equals("checked") && post.get("newterm", "").length() > 0) {
            String objectlink = post.get("newobjectlink", "");
            if (objectlink.length() > 0)
              try {
                objectlink = new MultiProtocolURL(objectlink).toNormalform(true);
              } catch (final MalformedURLException e) {
              }
            vocabulary.put(post.get("newterm", ""), post.get("newsynonyms", ""), objectlink);
          }

          // check if a term was modified
          for (Map.Entry<String, String> e : post.entrySet()) {
            if (e.getKey().startsWith("modify_") && e.getValue().equals("checked")) {
              String term = e.getKey().substring(7);
              String synonyms = post.get("synonyms_" + term, "");
              String objectlink = post.get("objectlink_" + term, "");
              vocabulary.put(term, synonyms, objectlink);
            }
          }

          // check if the vocabulary shall be cleared
          if (post.get("clear_table", "").equals("checked")) {
            vocabulary.clear();
          }

          // check if the vocabulary shall be deleted
          if (post.get("delete_vocabulary", "").equals("checked")) {
            LibraryProvider.autotagging.deleteVocabulary(vocabularyName);
            vocabulary = null;
            vocabularyName = null;
          }

          // check if a term shall be deleted
          if (vocabulary != null && vocabulary.size() > 0)
            for (Map.Entry<String, String> e : post.entrySet()) {
              if (e.getKey().startsWith("delete_") && e.getValue().equals("checked")) {
                String term = e.getKey().substring(7);
                vocabulary.delete(term);
              }
            }

          // check the isFacet property
          if (vocabulary != null && post.containsKey("set")) {
            boolean isFacet = post.getBoolean("isFacet");
            vocabulary.setFacet(isFacet);
            Set<String> omit = env.getConfigSet("search.result.show.vocabulary.omit");
            if (isFacet) omit.remove(vocabularyName);
            else omit.add(vocabularyName);
            env.setConfig("search.result.show.vocabulary.omit", omit);
          }
        }
      } catch (final IOException e) {
        ConcurrentLog.logException(e);
      }
    }

    int count = 0;
    for (Tagging v : vocs) {
      prop.put("vocabularyset_" + count + "_name", v.getName());
      prop.put(
          "vocabularyset_" + count + "_selected",
          ((vocabularyName != null && vocabularyName.equals(v.getName()))
                  || (discovername != null && discovername.equals(v.getName())))
              ? 1
              : 0);
      count++;
    }
    prop.put("vocabularyset", count);

    prop.put("create", vocabularyName == null ? 1 : 0);

    if (vocabulary == null) {
      prop.put("edit", 0);
    } else {
      prop.put("edit", 1);
      boolean editable = vocabulary.getFile() != null && vocabulary.getFile().exists();
      prop.put("edit_editable", editable ? 1 : 0);
      prop.putHTML("edit_editable_file", editable ? vocabulary.getFile().getAbsolutePath() : "");
      prop.putHTML("edit_name", vocabulary.getName());
      prop.putXML("edit_namexml", vocabulary.getName());
      prop.putHTML("edit_namespace", vocabulary.getNamespace());
      prop.put("edit_isFacet", vocabulary.isFacet() ? 1 : 0);
      prop.put("edit_size", vocabulary.size());
      prop.putHTML("edit_predicate", vocabulary.getPredicate());
      prop.putHTML("edit_prefix", Tagging.DEFAULT_PREFIX);
      prop.putHTML(
          "edit_editable_objectspace",
          vocabulary.getObjectspace() == null ? "" : vocabulary.getObjectspace());
      prop.putHTML("edit_editable_objectspacepredicate", DCTerms.references.getPredicate());
      int c = 0;
      boolean dark = false;
      int osl = vocabulary.getObjectspace() == null ? 0 : vocabulary.getObjectspace().length();
      Map<String, SOTuple> list = vocabulary.list();
      prop.put("edit_size", list.size());
      for (Map.Entry<String, SOTuple> entry : list.entrySet()) {
        prop.put("edit_terms_" + c + "_editable", editable ? 1 : 0);
        prop.put("edit_terms_" + c + "_dark", dark ? 1 : 0);
        dark = !dark;
        prop.putXML(
            "edit_terms_" + c + "_label",
            osl > entry.getValue().getObjectlink().length()
                ? entry.getKey()
                : entry.getValue().getObjectlink().substring(osl));
        prop.putHTML("edit_terms_" + c + "_term", entry.getKey());
        prop.putXML("edit_terms_" + c + "_termxml", entry.getKey());
        prop.putHTML("edit_terms_" + c + "_editable_term", entry.getKey());
        String synonymss = entry.getValue().getSynonymsCSV();
        prop.putHTML("edit_terms_" + c + "_editable_synonyms", synonymss);
        if (synonymss.length() > 0) {
          String[] synonymsa = entry.getValue().getSynonymsList();
          for (int i = 0; i < synonymsa.length; i++) {
            prop.put("edit_terms_" + c + "_synonyms_" + i + "_altLabel", synonymsa[i]);
          }
          prop.put("edit_terms_" + c + "_synonyms", synonymsa.length);
        } else {
          prop.put("edit_terms_" + c + "_synonyms", 0);
        }
        prop.putXML("edit_terms_" + c + "_editable_objectlink", entry.getValue().getObjectlink());
        c++;
        if (c > 3000) break;
      }
      prop.put("edit_terms", c);
    }

    // make charset list for import method selector
    prop.putHTML("create_charset_" + 0 + "_name", "autodetect");
    prop.put("create_charset_" + 0 + "_selected", 1);
    int c = 1;
    for (String cs : Charset.availableCharsets().keySet()) {
      prop.putHTML("create_charset_" + c + "_name", cs);
      prop.put("create_charset_" + c + "_selected", 0);
      c++;
    }
    prop.put("create_charset", c);

    // return rewrite properties
    return prop;
  }
  private static final void sendRespondError(
      final HashMap<String, Object> conProp,
      final OutputStream respond,
      final int errorcase,
      final int httpStatusCode,
      String httpStatusText,
      final String detailedErrorMsgText,
      final Object detailedErrorMsgFile,
      final serverObjects detailedErrorMsgValues,
      final Throwable stackTrace,
      ResponseHeader header)
      throws IOException {

    FileInputStream fis = null;
    ByteArrayOutputStream o = null;
    try {
      // setting the proper http status message
      String httpVersion = (String) conProp.get(HeaderFramework.CONNECTION_PROP_HTTP_VER);
      if (httpVersion == null) httpVersion = "HTTP/1.1";
      if ((httpStatusText == null) || (httpStatusText.length() == 0)) {
        // http1_1 includes http1_0 messages
        if (HeaderFramework.http1_1.containsKey(Integer.toString(httpStatusCode)))
          httpStatusText = HeaderFramework.http1_1.get(Integer.toString(httpStatusCode));
        else httpStatusText = "Unknown";
      }

      // generating the desired request url
      String host = (String) conProp.get(HeaderFramework.CONNECTION_PROP_HOST);
      String path = (String) conProp.get(HeaderFramework.CONNECTION_PROP_PATH);
      if (path == null) path = "/";
      final String args = (String) conProp.get(HeaderFramework.CONNECTION_PROP_ARGS);
      final String method = (String) conProp.get(HeaderFramework.CONNECTION_PROP_METHOD);

      final int port = Domains.stripToPort(host);
      host = Domains.stripToHostName(host);

      String urlString;
      try {
        urlString =
            (new DigestURL(
                    (method.equals(HeaderFramework.METHOD_CONNECT) ? "https" : "http"),
                    host,
                    port,
                    (args == null) ? path : path + "?" + args))
                .toString();
      } catch (final MalformedURLException e) {
        urlString = "invalid URL";
      }

      // set rewrite values
      final serverObjects tp = new serverObjects();

      String clientIP = (String) conProp.get(HeaderFramework.CONNECTION_PROP_CLIENTIP);
      if (clientIP == null) clientIP = Domains.LOCALHOST;

      tp.put("peerName", (switchboard.peers == null) ? "" : switchboard.peers.myName());
      tp.put("errorMessageType", Integer.toString(errorcase));
      tp.put("httpStatus", Integer.toString(httpStatusCode) + " " + httpStatusText);
      tp.put("requestMethod", (String) conProp.get(HeaderFramework.CONNECTION_PROP_METHOD));
      tp.put("requestURL", urlString);

      switch (errorcase) {
        case ERRORCASE_FILE:
          tp.put(
              "errorMessageType_file",
              (detailedErrorMsgFile == null) ? "" : detailedErrorMsgFile.toString());
          if ((detailedErrorMsgValues != null) && !detailedErrorMsgValues.isEmpty()) {
            // rewriting the value-names and add the proper name prefix:
            for (final Entry<String, String> entry : detailedErrorMsgValues.entrySet()) {
              tp.put("errorMessageType_" + entry.getKey(), entry.getValue());
            }
          }
          break;
        case ERRORCASE_MESSAGE:
        default:
          tp.put(
              "errorMessageType_detailedErrorMsg",
              (detailedErrorMsgText == null)
                  ? ""
                  : detailedErrorMsgText.replaceAll("\n", "<br />"));
          break;
      }

      // building the stacktrace
      if (stackTrace != null) {
        tp.put("printStackTrace", "1");
        final ByteBuffer errorMsg = new ByteBuffer(100);
        final PrintStream printStream = new PrintStream(errorMsg);
        stackTrace.printStackTrace(printStream);
        tp.put("printStackTrace_exception", stackTrace.toString());
        tp.put("printStackTrace_stacktrace", UTF8.String(errorMsg.getBytes()));
        printStream.close();
      } else {
        tp.put("printStackTrace", "0");
      }

      // Generated Tue, 23 Aug 2005 11:19:14 GMT by brain.wg (squid/2.5.STABLE3)
      // adding some system information
      final String systemDate = HeaderFramework.formatRFC1123(new Date());
      tp.put("date", systemDate);

      // rewrite the file
      final File htRootPath =
          new File(
              switchboard.getAppPath(),
              switchboard.getConfig(
                  SwitchboardConstants.HTROOT_PATH, SwitchboardConstants.HTROOT_PATH_DEFAULT));

      TemplateEngine.writeTemplate(
          "/proxymsg/error.html",
          fis = new FileInputStream(new File(htRootPath, "/proxymsg/error.html")),
          o = new ByteArrayOutputStream(512),
          tp);
      final byte[] result = o.toByteArray();
      o.close();
      o = null;

      if (header == null) header = new ResponseHeader(httpStatusCode);
      header.put(
          HeaderFramework.CONNECTION_PROP_PROXY_RESPOND_STATUS, Integer.toString(httpStatusCode));
      header.put(HeaderFramework.DATE, systemDate);
      header.put(HeaderFramework.CONTENT_TYPE, "text/html");
      header.put(HeaderFramework.CONTENT_LENGTH, Integer.toString(result.length));
      header.put(HeaderFramework.PRAGMA, "no-cache, no-store");
      sendRespondHeader(conProp, respond, httpVersion, httpStatusCode, httpStatusText, header);

      if (!method.equals(HeaderFramework.METHOD_HEAD)) {
        // write the array to the client
        FileUtils.copy(result, respond);
      }
      respond.flush();
    } finally {
      if (fis != null)
        try {
          fis.close();
        } catch (final Exception e) {
          ConcurrentLog.logException(e);
        }
      if (o != null)
        try {
          o.close();
        } catch (final Exception e) {
          ConcurrentLog.logException(e);
        }
    }
  }
  public static serverObjects respond(
      final RequestHeader header, final serverObjects post, final serverSwitch env) {

    // return variable that accumulates replacements
    final Switchboard sb = (Switchboard) env;

    // clean up all search events
    SearchEventCache.cleanupEvents(true);
    sb.index.clearCaches(); // every time the ranking is changed we need to remove old orderings

    // inital values for AJAX Elements (without JavaScript)
    final serverObjects prop = new serverObjects();
    prop.put("rejected", 0);

    Segment segment = sb.index;
    Fulltext fulltext = segment.fulltext();
    String localSolr = "/solr/select?core=collection1&q=*:*&start=0&rows=3";
    String remoteSolr =
        env.getConfig(SwitchboardConstants.FEDERATED_SERVICE_SOLR_INDEXING_URL, localSolr);
    if (!remoteSolr.endsWith("/")) remoteSolr = remoteSolr + "/";
    prop.put(
        "urlpublictextSolrURL",
        fulltext.connectedLocalSolr()
            ? localSolr
            : remoteSolr + "collection1/select?&q=*:*&start=0&rows=3");
    prop.putNum("urlpublictextSize", fulltext.collectionSize());
    prop.putNum("urlpublictextSegmentCount", fulltext.getDefaultConnector().getSegmentCount());
    prop.put(
        "webgraphSolrURL",
        fulltext.connectedLocalSolr()
            ? localSolr.replace("collection1", "webgraph")
            : remoteSolr + "webgraph/select?&q=*:*&start=0&rows=3");
    prop.putNum("webgraphSize", fulltext.useWebgraph() ? fulltext.webgraphSize() : 0);
    prop.putNum(
        "webgraphSegmentCount",
        fulltext.useWebgraph() ? fulltext.getWebgraphConnector().getSegmentCount() : 0);
    prop.putNum("citationSize", segment.citationCount());
    prop.putNum("citationSegmentCount", segment.citationSegmentCount());
    prop.putNum("rwipublictextSize", segment.RWICount());
    prop.putNum("rwipublictextSegmentCount", segment.RWISegmentCount());

    prop.put("list", "0");
    prop.put("loaderSize", 0);
    prop.put("loaderMax", 0);
    prop.put("list-loader", 0);

    int coreCrawlJobSize = sb.crawlQueues.coreCrawlJobSize();
    int limitCrawlJobSize = sb.crawlQueues.limitCrawlJobSize();
    int remoteTriggeredCrawlJobSize = sb.crawlQueues.remoteTriggeredCrawlJobSize();
    int noloadCrawlJobSize = sb.crawlQueues.noloadCrawlJobSize();
    int allsize =
        coreCrawlJobSize + limitCrawlJobSize + remoteTriggeredCrawlJobSize + noloadCrawlJobSize;

    prop.put("localCrawlSize", coreCrawlJobSize);
    prop.put("localCrawlState", "");
    prop.put("limitCrawlSize", limitCrawlJobSize);
    prop.put("limitCrawlState", "");
    prop.put("remoteCrawlSize", remoteTriggeredCrawlJobSize);
    prop.put("remoteCrawlState", "");
    prop.put("noloadCrawlSize", noloadCrawlJobSize);
    prop.put("noloadCrawlState", "");
    prop.put("terminate-button", allsize == 0 ? 0 : 1);
    prop.put("list-remote", 0);
    prop.put("forwardToCrawlStart", "0");

    prop.put("info", "0");
    boolean debug = (post != null && post.containsKey("debug"));

    if (post != null) {
      String c = post.toString();
      if (c.length() < 1000) ConcurrentLog.info("Crawl Start", c);
    }

    if (post != null && post.containsKey("queues_terminate_all")) {
      // terminate crawls individually
      sb.crawlQueues.noticeURL.clear();
      for (final byte[] h : sb.crawler.getActive()) {
        CrawlProfile p = sb.crawler.getActive(h);
        if (CrawlSwitchboard.DEFAULT_PROFILES.contains(p.name())) continue;
        if (p != null) sb.crawler.putPassive(h, p);
        sb.crawler.removeActive(h);
        sb.crawler.removePassive(h);
        try {
          sb.crawlQueues.noticeURL.removeByProfileHandle(p.handle(), 10000);
        } catch (SpaceExceededException e) {
        }
      }

      // clear stacks
      for (StackType stackType : StackType.values()) sb.crawlQueues.noticeURL.clear(stackType);
      try {
        sb.cleanProfiles();
      } catch (final InterruptedException e) {
        /* ignore this */
      }

      // remove pause
      sb.continueCrawlJob(SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL);
      sb.setConfig(SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL + "_isPaused_cause", "");
      sb.continueCrawlJob(SwitchboardConstants.CRAWLJOB_REMOTE_TRIGGERED_CRAWL);
      sb.setConfig(SwitchboardConstants.CRAWLJOB_REMOTE_TRIGGERED_CRAWL + "_isPaused_cause", "");
      prop.put("terminate-button", 0);
    }

    if (post != null && post.containsKey("continue")) {
      // continue queue
      final String queue = post.get("continue", "");
      if ("localcrawler".equals(queue)) {
        sb.continueCrawlJob(SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL);
        sb.setConfig(SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL + "_isPaused_cause", "");
      } else if ("remotecrawler".equals(queue)) {
        sb.continueCrawlJob(SwitchboardConstants.CRAWLJOB_REMOTE_TRIGGERED_CRAWL);
        sb.setConfig(SwitchboardConstants.CRAWLJOB_REMOTE_TRIGGERED_CRAWL + "_isPaused_cause", "");
      }
    }

    if (post != null && post.containsKey("pause")) {
      // pause queue
      final String queue = post.get("pause", "");
      if ("localcrawler".equals(queue)) {
        sb.pauseCrawlJob(
            SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL,
            "user request in Crawler_p from " + header.refererHost());
      } else if ("remotecrawler".equals(queue)) {
        sb.pauseCrawlJob(
            SwitchboardConstants.CRAWLJOB_REMOTE_TRIGGERED_CRAWL,
            "user request in Crawler_p from " + header.refererHost());
      }
    }
    String queuemessage =
        sb.getConfig(SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL + "_isPaused_cause", "");
    if (queuemessage.length() == 0) {
      prop.put("info-queue", 0);
    } else {
      prop.put("info-queue", 1);
      prop.putHTML("info-queue_message", "pause reason: " + queuemessage);
    }

    if (post != null && post.containsKey("terminate"))
      try {
        final String handle = post.get("handle", "");
        // termination of a crawl: shift the crawl from active to passive
        final CrawlProfile p = sb.crawler.getActive(handle.getBytes());
        if (p != null) sb.crawler.putPassive(handle.getBytes(), p);
        // delete all entries from the crawl queue that are deleted here
        sb.crawler.removeActive(handle.getBytes());
        sb.crawler.removePassive(handle.getBytes());
        sb.crawlQueues.noticeURL.removeByProfileHandle(handle, 10000);
      } catch (final SpaceExceededException e) {
        ConcurrentLog.logException(e);
      }

    if (post != null && post.containsKey("crawlingstart")) {
      // init crawl
      if (sb.peers == null) {
        prop.put("info", "3");
      } else {

        // remove crawlingFileContent before we record the call
        String crawlingFileName = post.get("crawlingFile");
        final File crawlingFile;
        if (crawlingFileName == null || crawlingFileName.isEmpty()) {
          crawlingFile = null;
        } else {
          if (crawlingFileName.startsWith("file://"))
            crawlingFileName = crawlingFileName.substring(7);
          crawlingFile = new File(crawlingFileName);
        }
        if (crawlingFile != null && crawlingFile.exists()) {
          post.remove("crawlingFile$file");
        }

        // prepare some filter that are adjusted in case that this is wanted
        boolean storeHTCache = "on".equals(post.get("storeHTCache", "off"));
        String newcrawlingMustMatch = post.get("mustmatch", CrawlProfile.MATCH_ALL_STRING);
        String newcrawlingMustNotMatch = post.get("mustnotmatch", CrawlProfile.MATCH_NEVER_STRING);
        if (newcrawlingMustMatch.length() < 2)
          newcrawlingMustMatch =
              CrawlProfile
                  .MATCH_ALL_STRING; // avoid that all urls are filtered out if bad value was
        // submitted
        boolean fullDomain =
            "domain".equals(post.get("range", "wide")); // special property in simple crawl start
        boolean subPath =
            "subpath".equals(post.get("range", "wide")); // special property in simple crawl start

        final boolean restrictedcrawl =
            fullDomain || subPath || !CrawlProfile.MATCH_ALL_STRING.equals(newcrawlingMustMatch);
        final boolean deleteage = restrictedcrawl && "age".equals(post.get("deleteold", "off"));
        Date deleteageDate = null;
        if (deleteage) {
          deleteageDate =
              timeParser(
                  true,
                  post.getInt("deleteIfOlderNumber", -1),
                  post.get("deleteIfOlderUnit", "year")); // year, month, day, hour
        }
        final boolean deleteold =
            (deleteage && deleteageDate != null)
                || (restrictedcrawl && post.getBoolean("deleteold"));

        final String sitemapURLStr = post.get("sitemapURL", "");
        String crawlingStart0 = post.get("crawlingURL", "").trim(); // the crawljob start url
        String[] rootURLs0 =
            crawlingStart0.indexOf('\n') > 0 || crawlingStart0.indexOf('\r') > 0
                ? crawlingStart0.split("[\\r\\n]+")
                : crawlingStart0.split(Pattern.quote("|"));
        Set<DigestURL> rootURLs = new HashSet<DigestURL>();
        String crawlName = "";
        if (crawlingFile == null)
          for (String crawlingStart : rootURLs0) {
            if (crawlingStart == null || crawlingStart.length() == 0) continue;
            // add the prefix http:// if necessary
            int pos = crawlingStart.indexOf("://", 0);
            if (pos == -1) {
              if (crawlingStart.startsWith("ftp")) crawlingStart = "ftp://" + crawlingStart;
              else crawlingStart = "http://" + crawlingStart;
            }
            try {
              DigestURL crawlingStartURL = new DigestURL(crawlingStart);
              rootURLs.add(crawlingStartURL);
              crawlName +=
                  ((crawlingStartURL.getHost() == null)
                          ? crawlingStartURL.toNormalform(true)
                          : crawlingStartURL.getHost())
                      + ',';
              if (crawlingStartURL != null
                  && (crawlingStartURL.isFile() || crawlingStartURL.isSMB())) storeHTCache = false;

            } catch (final MalformedURLException e) {
              ConcurrentLog.logException(e);
            }
          }
        else {
          crawlName = crawlingFile.getName();
        }
        if (crawlName.endsWith(",")) crawlName = crawlName.substring(0, crawlName.length() - 1);
        if (crawlName.length() > 64) {
          crawlName =
              "crawl_for_"
                  + rootURLs.size()
                  + "_start_points_"
                  + Integer.toHexString(crawlName.hashCode());
          int p = crawlName.lastIndexOf(',');
          if (p >= 8) crawlName = crawlName.substring(0, p);
        }
        if (crawlName.length() == 0 && sitemapURLStr.length() > 0)
          crawlName = "sitemap loader for " + sitemapURLStr;
        // in case that a root url has a file protocol, then the site filter does not work, patch
        // that:
        if (fullDomain) {
          for (DigestURL u : rootURLs)
            if (u.isFile()) {
              fullDomain = false;
              subPath = true;
              break;
            }
        }

        // delete old robots entries
        for (DigestURL ru : rootURLs) {
          sb.robots.delete(ru);
          try {
            if (ru.getHost() != null) { // might be null for file://
              Cache.delete(RobotsTxt.robotsURL(RobotsTxt.getHostPort(ru)).hash());
            }
          } catch (IOException e) {
          }
        }
        try {
          sb.robots.clear();
        } catch (IOException e) {
        } // to be safe: clear all.

        // set the crawl filter
        String ipMustMatch = post.get("ipMustmatch", CrawlProfile.MATCH_ALL_STRING);
        final String ipMustNotMatch = post.get("ipMustnotmatch", CrawlProfile.MATCH_NEVER_STRING);
        if (ipMustMatch.length() < 2) ipMustMatch = CrawlProfile.MATCH_ALL_STRING;
        final String countryMustMatch =
            post.getBoolean("countryMustMatchSwitch") ? post.get("countryMustMatchList", "") : "";
        sb.setConfig("crawlingIPMustMatch", ipMustMatch);
        sb.setConfig("crawlingIPMustNotMatch", ipMustNotMatch);
        if (countryMustMatch.length() > 0)
          sb.setConfig("crawlingCountryMustMatch", countryMustMatch);

        String crawlerNoDepthLimitMatch =
            post.get("crawlingDepthExtension", CrawlProfile.MATCH_NEVER_STRING);
        final String indexUrlMustMatch = post.get("indexmustmatch", CrawlProfile.MATCH_ALL_STRING);
        final String indexUrlMustNotMatch =
            post.get("indexmustnotmatch", CrawlProfile.MATCH_NEVER_STRING);
        final String indexContentMustMatch =
            post.get("indexcontentmustmatch", CrawlProfile.MATCH_ALL_STRING);
        final String indexContentMustNotMatch =
            post.get("indexcontentmustnotmatch", CrawlProfile.MATCH_NEVER_STRING);

        final boolean crawlOrder = post.get("crawlOrder", "off").equals("on");
        env.setConfig("crawlOrder", crawlOrder);

        if (crawlOrder)
          crawlerNoDepthLimitMatch =
              CrawlProfile.MATCH_NEVER_STRING; // without limitation the crawl order does not work

        int newcrawlingdepth = post.getInt("crawlingDepth", 8);
        env.setConfig("crawlingDepth", Integer.toString(newcrawlingdepth));
        if ((crawlOrder) && (newcrawlingdepth > 8)) newcrawlingdepth = 8;

        boolean directDocByURL =
            "on"
                .equals(
                    post.get(
                        "directDocByURL",
                        "off")); // catch also all linked media documents without loading them
        env.setConfig("crawlingDirectDocByURL", directDocByURL);

        final String collection = post.get("collection", "user");
        env.setConfig("collection", collection);

        // recrawl
        final String recrawl = post.get("recrawl", "nodoubles"); // nodoubles, reload, scheduler
        Date crawlingIfOlder = null;
        if ("reload".equals(recrawl)) {
          crawlingIfOlder =
              timeParser(
                  true,
                  post.getInt("reloadIfOlderNumber", -1),
                  post.get("reloadIfOlderUnit", "year")); // year, month, day, hour
        }
        env.setConfig(
            "crawlingIfOlder",
            crawlingIfOlder == null ? Long.MAX_VALUE : crawlingIfOlder.getTime());

        // store this call as api call
        sb.tables.recordAPICall(
            post,
            "Crawler_p.html",
            WorkTables.TABLE_API_TYPE_CRAWLER,
            "crawl start for "
                + ((rootURLs.size() == 0)
                    ? post.get("crawlingFile", "")
                    : rootURLs.iterator().next().toNormalform(true)));

        final boolean crawlingDomMaxCheck = "on".equals(post.get("crawlingDomMaxCheck", "off"));
        final int crawlingDomMaxPages =
            (crawlingDomMaxCheck) ? post.getInt("crawlingDomMaxPages", -1) : -1;
        env.setConfig("crawlingDomMaxPages", Integer.toString(crawlingDomMaxPages));

        boolean followFrames = "on".equals(post.get("followFrames", "false"));
        env.setConfig("followFrames", followFrames);

        boolean obeyHtmlRobotsNoindex = "on".equals(post.get("obeyHtmlRobotsNoindex", "false"));
        env.setConfig("obeyHtmlRobotsNoindex", obeyHtmlRobotsNoindex);

        boolean obeyHtmlRobotsNofollow = "on".equals(post.get("obeyHtmlRobotsNofollow", "false"));
        env.setConfig("obeyHtmlRobotsNofollow", obeyHtmlRobotsNofollow);

        final boolean indexText = "on".equals(post.get("indexText", "false"));
        env.setConfig("indexText", indexText);

        final boolean indexMedia = "on".equals(post.get("indexMedia", "false"));
        env.setConfig("indexMedia", indexMedia);

        env.setConfig("storeHTCache", storeHTCache);

        String defaultAgentName =
            sb.isIntranetMode()
                ? ClientIdentification.yacyIntranetCrawlerAgentName
                : ClientIdentification.yacyInternetCrawlerAgentName;
        String agentName = post.get("agentName", defaultAgentName);
        ClientIdentification.Agent agent = ClientIdentification.getAgent(agentName);
        if (agent == null) agent = ClientIdentification.getAgent(defaultAgentName);

        CacheStrategy cachePolicy = CacheStrategy.parse(post.get("cachePolicy", "iffresh"));
        if (cachePolicy == null) cachePolicy = CacheStrategy.IFFRESH;

        String crawlingMode = post.get("crawlingMode", "url");

        if ("file".equals(crawlingMode) && post.containsKey("crawlingFile")) {
          newcrawlingMustNotMatch = CrawlProfile.MATCH_NEVER_STRING;
          directDocByURL = false;
        }

        if ("sitemap".equals(crawlingMode)) {
          newcrawlingMustMatch = CrawlProfile.MATCH_ALL_STRING;
          newcrawlingMustNotMatch = CrawlProfile.MATCH_NEVER_STRING;
          newcrawlingdepth = 0;
          directDocByURL = false;
        }

        if ("sitelist".equals(crawlingMode)) {
          newcrawlingMustNotMatch = CrawlProfile.MATCH_NEVER_STRING;
          Set<DigestURL> newRootURLs = new HashSet<DigestURL>();
          for (DigestURL sitelistURL : rootURLs) {
            // download document
            Document scraper;
            try {
              scraper =
                  sb.loader.loadDocument(
                      sitelistURL, CacheStrategy.IFFRESH, BlacklistType.CRAWLER, agent);
              // get links and generate filter
              for (DigestURL u : scraper.getHyperlinks().keySet()) {
                newRootURLs.add(u);
              }
            } catch (final IOException e) {
              ConcurrentLog.logException(e);
            }
          }
          rootURLs = newRootURLs;
          crawlingMode = "url";
          if ((fullDomain || subPath) && newcrawlingdepth > 0)
            newcrawlingMustMatch =
                CrawlProfile
                    .MATCH_ALL_STRING; // to prevent that there is a restriction on the original
          // urls
        }

        // delete all error urls for that domain
        // and all urls for that host from the crawl queue
        Set<String> hosthashes = new HashSet<String>();
        boolean anysmbftporpdf = false;
        for (DigestURL u : rootURLs) {
          sb.index.fulltext().remove(u.hash());
          hosthashes.add(u.hosthash());
          if ("smb.ftp".indexOf(u.getProtocol()) >= 0
              || "pdf".equals(MultiProtocolURL.getFileExtension(u.getFileName())))
            anysmbftporpdf = true;
        }
        sb.crawlQueues.removeHosts(hosthashes);
        sb.index.fulltext().commit(true);

        boolean crawlingQ =
            anysmbftporpdf
                || "on".equals(post.get("crawlingQ", "off"))
                || "sitemap".equals(crawlingMode);
        env.setConfig("crawlingQ", crawlingQ);

        // compute mustmatch filter according to rootURLs
        if ((fullDomain || subPath) && newcrawlingdepth > 0) {
          String siteFilter = ".*";
          if (fullDomain) {
            siteFilter = CrawlProfile.siteFilter(rootURLs);
            if (deleteold) {
              sb.index.fulltext().deleteStaleDomainHashes(hosthashes, deleteageDate);
            }
          } else if (subPath) {
            siteFilter = CrawlProfile.subpathFilter(rootURLs);
            if (deleteold) {
              for (DigestURL u : rootURLs) {
                String basepath = u.toNormalform(true);
                if (!basepath.endsWith("/")) {
                  int p = basepath.lastIndexOf("/");
                  if (p > 0) basepath = basepath.substring(0, p + 1);
                }
                int count = sb.index.fulltext().remove(basepath, deleteageDate);
                if (count > 0)
                  ConcurrentLog.info(
                      "Crawler_p", "deleted " + count + " documents for host " + u.getHost());
              }
            }
          }
          if (CrawlProfile.MATCH_ALL_STRING.equals(newcrawlingMustMatch)) {
            newcrawlingMustMatch = siteFilter;
          } else if (!CrawlProfile.MATCH_ALL_STRING.equals(siteFilter)) {
            // combine both
            newcrawlingMustMatch = "(" + newcrawlingMustMatch + ")|(" + siteFilter + ")";
          }
        }

        // check if the crawl filter works correctly
        try {
          Pattern mmp = Pattern.compile(newcrawlingMustMatch);
          for (DigestURL u : rootURLs) {
            assert mmp.matcher(u.toNormalform(true)).matches()
                : "pattern " + mmp.toString() + " does not match url " + u.toNormalform(true);
          }
        } catch (final PatternSyntaxException e) {
          prop.put("info", "4"); // crawlfilter does not match url
          prop.putHTML("info_newcrawlingfilter", newcrawlingMustMatch);
          prop.putHTML("info_error", e.getMessage());
        }

        boolean hasCrawlstartDataOK = !crawlName.isEmpty();
        if (hasCrawlstartDataOK) {
          // check crawlurl was given in sitecrawl
          if ("url".equals(crawlingMode) && rootURLs.size() == 0) hasCrawlstartDataOK = false;
        }

        String snapshotsMaxDepthString = post.get("snapshotsMaxDepth", "-1");
        int snapshotsMaxDepth = Integer.parseInt(snapshotsMaxDepthString);
        boolean snapshotsLoadImage = post.getBoolean("snapshotsLoadImage");
        boolean snapshotsReplaceOld = post.getBoolean("snapshotsReplaceOld");
        String snapshotsMustnotmatch = post.get("snapshotsMustnotmatch", "");

        // get vocabulary scraper info
        JSONObject vocabulary_scraper =
            new JSONObject(); // key = vocabulary_name, value = properties with key = type (i.e.
        // 'class') and value = keyword in context
        for (String key : post.keySet()) {
          if (key.startsWith("vocabulary_")) {
            if (key.endsWith("_class")) {
              String vocabulary = key.substring(11, key.length() - 6);
              String value = post.get(key);
              if (value != null && value.length() > 0) {
                JSONObject props;
                try {
                  props = vocabulary_scraper.getJSONObject(vocabulary);
                } catch (JSONException e) {
                  props = new JSONObject();
                  vocabulary_scraper.put(vocabulary, props);
                }
                props.put("class", value);
              }
            }
          }
        }

        int timezoneOffset = post.getInt("timezoneOffset", 0);

        // in case that we crawl from a file, load that file and re-compute mustmatch pattern
        List<AnchorURL> hyperlinks_from_file = null;
        if ("file".equals(crawlingMode)
            && post.containsKey("crawlingFile")
            && crawlingFile != null) {
          final String crawlingFileContent = post.get("crawlingFile$file", "");
          try {
            // check if the crawl filter works correctly
            final ContentScraper scraper =
                new ContentScraper(
                    new DigestURL(crawlingFile), 10000000, new VocabularyScraper(), timezoneOffset);
            final Writer writer = new TransformerWriter(null, null, scraper, null, false);
            if (crawlingFile != null && crawlingFile.exists()) {
              FileUtils.copy(new FileInputStream(crawlingFile), writer);
            } else {
              FileUtils.copy(crawlingFileContent, writer);
            }
            writer.close();

            // get links and generate filter
            hyperlinks_from_file = scraper.getAnchors();
            if (newcrawlingdepth > 0) {
              if (fullDomain) {
                newcrawlingMustMatch = CrawlProfile.siteFilter(hyperlinks_from_file);
              } else if (subPath) {
                newcrawlingMustMatch = CrawlProfile.subpathFilter(hyperlinks_from_file);
              }
            }
          } catch (final Exception e) {
            // mist
            prop.put("info", "7"); // Error with file
            prop.putHTML("info_crawlingStart", crawlingFileName);
            prop.putHTML("info_error", e.getMessage());
            ConcurrentLog.logException(e);
          }
          sb.continueCrawlJob(SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL);
        }

        // prepare a new crawling profile
        final CrawlProfile profile;
        byte[] handle;
        if (hasCrawlstartDataOK) {
          profile =
              new CrawlProfile(
                  crawlName,
                  newcrawlingMustMatch,
                  newcrawlingMustNotMatch,
                  ipMustMatch,
                  ipMustNotMatch,
                  countryMustMatch,
                  crawlerNoDepthLimitMatch,
                  indexUrlMustMatch,
                  indexUrlMustNotMatch,
                  indexContentMustMatch,
                  indexContentMustNotMatch,
                  newcrawlingdepth,
                  directDocByURL,
                  crawlingIfOlder,
                  crawlingDomMaxPages,
                  crawlingQ,
                  followFrames,
                  obeyHtmlRobotsNoindex,
                  obeyHtmlRobotsNofollow,
                  indexText,
                  indexMedia,
                  storeHTCache,
                  crawlOrder,
                  snapshotsMaxDepth,
                  snapshotsLoadImage,
                  snapshotsReplaceOld,
                  snapshotsMustnotmatch,
                  cachePolicy,
                  collection,
                  agentName,
                  new VocabularyScraper(vocabulary_scraper),
                  timezoneOffset);
          handle = ASCII.getBytes(profile.handle());

          // before we fire up a new crawl, we make sure that another crawl with the same name is
          // not running
          sb.crawler.removeActive(handle);
          sb.crawler.removePassive(handle);
          try {
            sb.crawlQueues.noticeURL.removeByProfileHandle(profile.handle(), 10000);
          } catch (final SpaceExceededException e1) {
          }
        } else {
          profile = null;
          handle = null;
        }

        // start the crawl
        if ("url".equals(crawlingMode)) {
          if (rootURLs.size() == 0) {
            prop.put("info", "5"); // Crawling failed
            prop.putHTML("info_crawlingURL", "(no url given)");
            prop.putHTML("info_reasonString", "you must submit at least one crawl url");
          } else {

            // stack requests
            sb.crawler.putActive(handle, profile);
            final Set<DigestURL> successurls = new HashSet<DigestURL>();
            final Map<DigestURL, String> failurls = new HashMap<DigestURL, String>();
            sb.stackURLs(rootURLs, profile, successurls, failurls);

            if (failurls.size() == 0) {
              // liftoff!
              prop.put("info", "8");
              prop.putHTML("info_crawlingURL", post.get("crawlingURL"));

              // generate a YaCyNews if the global flag was set
              if (!sb.isRobinsonMode() && crawlOrder) {
                final Map<String, String> m =
                    new HashMap<String, String>(profile); // must be cloned
                m.remove("specificDepth");
                m.remove("indexText");
                m.remove("indexMedia");
                m.remove("remoteIndexing");
                m.remove("xsstopw");
                m.remove("xpstopw");
                m.remove("xdstopw");
                m.remove("storeTXCache");
                m.remove("storeHTCache");
                m.remove("generalFilter");
                m.remove("specificFilter");
                m.put("intention", post.get("intention", "").replace(',', '/'));
                sb.peers.newsPool.publishMyNews(
                    sb.peers.mySeed(), NewsPool.CATEGORY_CRAWL_START, m);
              }
            } else {
              StringBuilder fr = new StringBuilder();
              for (Map.Entry<DigestURL, String> failure : failurls.entrySet()) {
                sb.crawlQueues.errorURL.push(
                    failure.getKey(),
                    0,
                    null,
                    FailCategory.FINAL_LOAD_CONTEXT,
                    failure.getValue(),
                    -1);
                fr.append(failure.getValue()).append('/');
              }

              prop.put("info", "5"); // Crawling failed
              prop.putHTML("info_crawlingURL", (post.get("crawlingURL")));
              prop.putHTML("info_reasonString", fr.toString());
            }
            if (successurls.size() > 0)
              sb.continueCrawlJob(SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL);
          }
        } else if ("sitemap".equals(crawlingMode)) {
          try {
            final DigestURL sitemapURL =
                sitemapURLStr.indexOf("//") > 0
                    ? new DigestURL(sitemapURLStr)
                    : new DigestURL(
                        rootURLs.iterator().next(),
                        sitemapURLStr); // fix for relative paths which should not exist but are
            // used anyway
            sb.crawler.putActive(handle, profile);
            final SitemapImporter importer = new SitemapImporter(sb, sitemapURL, profile);
            importer.start();
            sb.continueCrawlJob(SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL);
          } catch (final Exception e) {
            // mist
            prop.put("info", "6"); // Error with url
            prop.putHTML("info_crawlingStart", sitemapURLStr);
            prop.putHTML("info_error", e.getMessage());
            ConcurrentLog.logException(e);
          }
        } else if ("file".equals(crawlingMode)) {
          if (post.containsKey("crawlingFile")
              && crawlingFile != null
              && hyperlinks_from_file != null) {
            try {
              if (newcrawlingdepth > 0) {
                if (fullDomain) {
                  newcrawlingMustMatch = CrawlProfile.siteFilter(hyperlinks_from_file);
                } else if (subPath) {
                  newcrawlingMustMatch = CrawlProfile.subpathFilter(hyperlinks_from_file);
                }
              }
              sb.crawler.putActive(handle, profile);
              sb.crawlStacker.enqueueEntriesAsynchronous(
                  sb.peers.mySeed().hash.getBytes(),
                  profile.handle(),
                  hyperlinks_from_file,
                  profile.timezoneOffset());
            } catch (final PatternSyntaxException e) {
              prop.put("info", "4"); // crawlfilter does not match url
              prop.putHTML("info_newcrawlingfilter", newcrawlingMustMatch);
              prop.putHTML("info_error", e.getMessage());
            } catch (final Exception e) {
              // mist
              prop.put("info", "7"); // Error with file
              prop.putHTML("info_crawlingStart", crawlingFileName);
              prop.putHTML("info_error", e.getMessage());
              ConcurrentLog.logException(e);
            }
            sb.continueCrawlJob(SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL);
          }
        }
      }
    }

    /*
    *  <input id="customPPM" name="customPPM" type="number" min="10" max="30000" style="width:46px" value="#[customPPMdefault]#" />PPM
       <input id="latencyFactor" name="latencyFactor" type="number" min="0.1" max="3.0" step="0.1" style="width:32px" value="#[latencyFactorDefault]#" />LF
       <input id="MaxSameHostInQueue" name="MaxSameHostInQueue" type="number" min="1" max="30" style="width:32px" value="#[MaxSameHostInQueueDefault]#" />MH
       <input type="submit" name="crawlingPerformance" value="set" />
       (<a href="/Crawler_p.html?crawlingPerformance=minimum">min</a>/<a href="/Crawler_p.html?crawlingPerformance=maximum">max</a>)
       </td>
    */
    if (post != null && post.containsKey("crawlingPerformance")) {
      final String crawlingPerformance = post.get("crawlingPerformance", "custom");
      final long LCbusySleep1 =
          sb.getConfigLong(SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL_BUSYSLEEP, 1000L);
      int wantedPPM = (LCbusySleep1 == 0) ? 30000 : (int) (60000L / LCbusySleep1);
      try {
        wantedPPM = post.getInt("customPPM", wantedPPM);
      } catch (final NumberFormatException e) {
      }
      if ("minimum".equals(crawlingPerformance.toLowerCase())) wantedPPM = 10;
      if ("maximum".equals(crawlingPerformance.toLowerCase())) wantedPPM = 30000;

      int wPPM = wantedPPM;
      if (wPPM <= 0) {
        wPPM = 1;
      }
      if (wPPM >= 30000) {
        wPPM = 30000;
      }
      final int newBusySleep = 60000 / wPPM; // for wantedPPM = 10: 6000; for wantedPPM = 1000: 60
      final float loadprereq =
          wantedPPM <= 10 ? 1.0f : wantedPPM <= 100 ? 2.0f : wantedPPM >= 1000 ? 8.0f : 3.0f;

      BusyThread thread;

      thread = sb.getThread(SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL);
      if (thread != null) {
        sb.setConfig(
            SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL_BUSYSLEEP, thread.setBusySleep(newBusySleep));
        sb.setConfig(
            SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL_LOADPREREQ,
            thread.setLoadPreReqisite(loadprereq));
        thread.setLoadPreReqisite(loadprereq);
        thread.setIdleSleep(2000);
      }

      float latencyFactor = post.getFloat("latencyFactor", 0.5f);
      int MaxSameHostInQueue = post.getInt("MaxSameHostInQueue", 20);
      env.setConfig(SwitchboardConstants.CRAWLER_LATENCY_FACTOR, latencyFactor);
      env.setConfig(SwitchboardConstants.CRAWLER_MAX_SAME_HOST_IN_QUEUE, MaxSameHostInQueue);
    }

    // performance settings
    final long LCbusySleep =
        env.getConfigLong(SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL_BUSYSLEEP, 1000L);
    final int LCppm = (int) (60000L / Math.max(1, LCbusySleep));
    prop.put("customPPMdefault", Integer.toString(LCppm));
    prop.put(
        "latencyFactorDefault",
        env.getConfigFloat(SwitchboardConstants.CRAWLER_LATENCY_FACTOR, 0.5f));
    prop.put(
        "MaxSameHostInQueueDefault",
        env.getConfigInt(SwitchboardConstants.CRAWLER_MAX_SAME_HOST_IN_QUEUE, 20));

    // generate crawl profile table
    int count = 0;
    boolean dark = true;
    final int domlistlength = (post == null) ? 160 : post.getInt("domlistlength", 160);
    CrawlProfile profile;
    // put active crawls into list
    String hosts = "";
    for (final byte[] h : sb.crawler.getActive()) {
      profile = sb.crawler.getActive(h);
      if (CrawlSwitchboard.DEFAULT_PROFILES.contains(profile.name())) continue;
      profile.putProfileEntry("crawlProfilesShow_list_", prop, true, dark, count, domlistlength);
      prop.put("crawlProfilesShow_list_" + count + "_debug", debug ? 1 : 0);
      if (debug) {
        RowHandleSet urlhashes = sb.crawler.getURLHashes(h);
        prop.put(
            "crawlProfilesShow_list_" + count + "_debug_count",
            urlhashes == null ? "unknown" : Integer.toString(urlhashes.size()));
      }
      hosts = hosts + "," + profile.name();
      dark = !dark;
      count++;
    }
    prop.put("crawlProfilesShow_debug", debug ? 1 : 0);
    prop.put("crawlProfilesShow_list", count);
    prop.put("crawlProfilesShow_count", count);
    prop.put("crawlProfilesShow", count == 0 ? 0 : 1);

    prop.put("crawlProfilesShow_linkstructure", 0);

    if (post != null) { // handle config button to display graphic
      if (post.get("hidewebstructuregraph") != null)
        sb.setConfig(SwitchboardConstants.DECORATION_GRAFICS_LINKSTRUCTURE, false);
      if (post.get("showwebstructuregraph") != null)
        sb.setConfig(SwitchboardConstants.DECORATION_GRAFICS_LINKSTRUCTURE, true);
    }
    if (count > 0
        && sb.getConfigBool(SwitchboardConstants.DECORATION_GRAFICS_LINKSTRUCTURE, true)) {
      // collect the host names for 'wide' crawls which can be visualized
      boolean showLinkstructure = hosts.length() > 0 && !hosts.contains("file:");
      if (showLinkstructure) {
        StringBuilder q = new StringBuilder();
        hosts = hosts.substring(1);
        q.append(CollectionSchema.host_s.getSolrFieldName())
            .append(':')
            .append(hosts)
            .append(" OR ")
            .append(CollectionSchema.host_s.getSolrFieldName())
            .append(':')
            .append("www.")
            .append(hosts);
        try {
          prop.put(
              "crawlProfilesShow_linkstructure",
              count == 1
                      && sb.index.fulltext().getDefaultConnector().getCountByQuery(q.toString()) > 0
                  ? 1
                  : 2);
          prop.put("crawlProfilesShow_linkstructure_hosts", hosts);
        } catch (IOException e) {
        }
      }
    }

    // return rewrite properties
    return prop;
  }