public static serverObjects respond( final RequestHeader header, final serverObjects post, final serverSwitch env) { // return variable that accumulates replacements final Switchboard sb = (Switchboard) env; // clean up all search events SearchEventCache.cleanupEvents(true); sb.index.clearCaches(); // every time the ranking is changed we need to remove old orderings // inital values for AJAX Elements (without JavaScript) final serverObjects prop = new serverObjects(); prop.put("rejected", 0); Segment segment = sb.index; Fulltext fulltext = segment.fulltext(); String localSolr = "/solr/select?core=collection1&q=*:*&start=0&rows=3"; String remoteSolr = env.getConfig(SwitchboardConstants.FEDERATED_SERVICE_SOLR_INDEXING_URL, localSolr); if (!remoteSolr.endsWith("/")) remoteSolr = remoteSolr + "/"; prop.put( "urlpublictextSolrURL", fulltext.connectedLocalSolr() ? localSolr : remoteSolr + "collection1/select?&q=*:*&start=0&rows=3"); prop.putNum("urlpublictextSize", fulltext.collectionSize()); prop.putNum("urlpublictextSegmentCount", fulltext.getDefaultConnector().getSegmentCount()); prop.put( "webgraphSolrURL", fulltext.connectedLocalSolr() ? localSolr.replace("collection1", "webgraph") : remoteSolr + "webgraph/select?&q=*:*&start=0&rows=3"); prop.putNum("webgraphSize", fulltext.useWebgraph() ? fulltext.webgraphSize() : 0); prop.putNum( "webgraphSegmentCount", fulltext.useWebgraph() ? fulltext.getWebgraphConnector().getSegmentCount() : 0); prop.putNum("citationSize", segment.citationCount()); prop.putNum("citationSegmentCount", segment.citationSegmentCount()); prop.putNum("rwipublictextSize", segment.RWICount()); prop.putNum("rwipublictextSegmentCount", segment.RWISegmentCount()); prop.put("list", "0"); prop.put("loaderSize", 0); prop.put("loaderMax", 0); prop.put("list-loader", 0); int coreCrawlJobSize = sb.crawlQueues.coreCrawlJobSize(); int limitCrawlJobSize = sb.crawlQueues.limitCrawlJobSize(); int remoteTriggeredCrawlJobSize = sb.crawlQueues.remoteTriggeredCrawlJobSize(); int noloadCrawlJobSize = sb.crawlQueues.noloadCrawlJobSize(); int allsize = coreCrawlJobSize + limitCrawlJobSize + remoteTriggeredCrawlJobSize + noloadCrawlJobSize; prop.put("localCrawlSize", coreCrawlJobSize); prop.put("localCrawlState", ""); prop.put("limitCrawlSize", limitCrawlJobSize); prop.put("limitCrawlState", ""); prop.put("remoteCrawlSize", remoteTriggeredCrawlJobSize); prop.put("remoteCrawlState", ""); prop.put("noloadCrawlSize", noloadCrawlJobSize); prop.put("noloadCrawlState", ""); prop.put("terminate-button", allsize == 0 ? 0 : 1); prop.put("list-remote", 0); prop.put("forwardToCrawlStart", "0"); prop.put("info", "0"); boolean debug = (post != null && post.containsKey("debug")); if (post != null) { String c = post.toString(); if (c.length() < 1000) ConcurrentLog.info("Crawl Start", c); } if (post != null && post.containsKey("queues_terminate_all")) { // terminate crawls individually sb.crawlQueues.noticeURL.clear(); for (final byte[] h : sb.crawler.getActive()) { CrawlProfile p = sb.crawler.getActive(h); if (CrawlSwitchboard.DEFAULT_PROFILES.contains(p.name())) continue; if (p != null) sb.crawler.putPassive(h, p); sb.crawler.removeActive(h); sb.crawler.removePassive(h); try { sb.crawlQueues.noticeURL.removeByProfileHandle(p.handle(), 10000); } catch (SpaceExceededException e) { } } // clear stacks for (StackType stackType : StackType.values()) sb.crawlQueues.noticeURL.clear(stackType); try { sb.cleanProfiles(); } catch (final InterruptedException e) { /* ignore this */ } // remove pause sb.continueCrawlJob(SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL); sb.setConfig(SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL + "_isPaused_cause", ""); sb.continueCrawlJob(SwitchboardConstants.CRAWLJOB_REMOTE_TRIGGERED_CRAWL); sb.setConfig(SwitchboardConstants.CRAWLJOB_REMOTE_TRIGGERED_CRAWL + "_isPaused_cause", ""); prop.put("terminate-button", 0); } if (post != null && post.containsKey("continue")) { // continue queue final String queue = post.get("continue", ""); if ("localcrawler".equals(queue)) { sb.continueCrawlJob(SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL); sb.setConfig(SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL + "_isPaused_cause", ""); } else if ("remotecrawler".equals(queue)) { sb.continueCrawlJob(SwitchboardConstants.CRAWLJOB_REMOTE_TRIGGERED_CRAWL); sb.setConfig(SwitchboardConstants.CRAWLJOB_REMOTE_TRIGGERED_CRAWL + "_isPaused_cause", ""); } } if (post != null && post.containsKey("pause")) { // pause queue final String queue = post.get("pause", ""); if ("localcrawler".equals(queue)) { sb.pauseCrawlJob( SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL, "user request in Crawler_p from " + header.refererHost()); } else if ("remotecrawler".equals(queue)) { sb.pauseCrawlJob( SwitchboardConstants.CRAWLJOB_REMOTE_TRIGGERED_CRAWL, "user request in Crawler_p from " + header.refererHost()); } } String queuemessage = sb.getConfig(SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL + "_isPaused_cause", ""); if (queuemessage.length() == 0) { prop.put("info-queue", 0); } else { prop.put("info-queue", 1); prop.putHTML("info-queue_message", "pause reason: " + queuemessage); } if (post != null && post.containsKey("terminate")) try { final String handle = post.get("handle", ""); // termination of a crawl: shift the crawl from active to passive final CrawlProfile p = sb.crawler.getActive(handle.getBytes()); if (p != null) sb.crawler.putPassive(handle.getBytes(), p); // delete all entries from the crawl queue that are deleted here sb.crawler.removeActive(handle.getBytes()); sb.crawler.removePassive(handle.getBytes()); sb.crawlQueues.noticeURL.removeByProfileHandle(handle, 10000); } catch (final SpaceExceededException e) { ConcurrentLog.logException(e); } if (post != null && post.containsKey("crawlingstart")) { // init crawl if (sb.peers == null) { prop.put("info", "3"); } else { // remove crawlingFileContent before we record the call String crawlingFileName = post.get("crawlingFile"); final File crawlingFile; if (crawlingFileName == null || crawlingFileName.isEmpty()) { crawlingFile = null; } else { if (crawlingFileName.startsWith("file://")) crawlingFileName = crawlingFileName.substring(7); crawlingFile = new File(crawlingFileName); } if (crawlingFile != null && crawlingFile.exists()) { post.remove("crawlingFile$file"); } // prepare some filter that are adjusted in case that this is wanted boolean storeHTCache = "on".equals(post.get("storeHTCache", "off")); String newcrawlingMustMatch = post.get("mustmatch", CrawlProfile.MATCH_ALL_STRING); String newcrawlingMustNotMatch = post.get("mustnotmatch", CrawlProfile.MATCH_NEVER_STRING); if (newcrawlingMustMatch.length() < 2) newcrawlingMustMatch = CrawlProfile .MATCH_ALL_STRING; // avoid that all urls are filtered out if bad value was // submitted boolean fullDomain = "domain".equals(post.get("range", "wide")); // special property in simple crawl start boolean subPath = "subpath".equals(post.get("range", "wide")); // special property in simple crawl start final boolean restrictedcrawl = fullDomain || subPath || !CrawlProfile.MATCH_ALL_STRING.equals(newcrawlingMustMatch); final boolean deleteage = restrictedcrawl && "age".equals(post.get("deleteold", "off")); Date deleteageDate = null; if (deleteage) { deleteageDate = timeParser( true, post.getInt("deleteIfOlderNumber", -1), post.get("deleteIfOlderUnit", "year")); // year, month, day, hour } final boolean deleteold = (deleteage && deleteageDate != null) || (restrictedcrawl && post.getBoolean("deleteold")); final String sitemapURLStr = post.get("sitemapURL", ""); String crawlingStart0 = post.get("crawlingURL", "").trim(); // the crawljob start url String[] rootURLs0 = crawlingStart0.indexOf('\n') > 0 || crawlingStart0.indexOf('\r') > 0 ? crawlingStart0.split("[\\r\\n]+") : crawlingStart0.split(Pattern.quote("|")); Set<DigestURL> rootURLs = new HashSet<DigestURL>(); String crawlName = ""; if (crawlingFile == null) for (String crawlingStart : rootURLs0) { if (crawlingStart == null || crawlingStart.length() == 0) continue; // add the prefix http:// if necessary int pos = crawlingStart.indexOf("://", 0); if (pos == -1) { if (crawlingStart.startsWith("ftp")) crawlingStart = "ftp://" + crawlingStart; else crawlingStart = "http://" + crawlingStart; } try { DigestURL crawlingStartURL = new DigestURL(crawlingStart); rootURLs.add(crawlingStartURL); crawlName += ((crawlingStartURL.getHost() == null) ? crawlingStartURL.toNormalform(true) : crawlingStartURL.getHost()) + ','; if (crawlingStartURL != null && (crawlingStartURL.isFile() || crawlingStartURL.isSMB())) storeHTCache = false; } catch (final MalformedURLException e) { ConcurrentLog.logException(e); } } else { crawlName = crawlingFile.getName(); } if (crawlName.endsWith(",")) crawlName = crawlName.substring(0, crawlName.length() - 1); if (crawlName.length() > 64) { crawlName = "crawl_for_" + rootURLs.size() + "_start_points_" + Integer.toHexString(crawlName.hashCode()); int p = crawlName.lastIndexOf(','); if (p >= 8) crawlName = crawlName.substring(0, p); } if (crawlName.length() == 0 && sitemapURLStr.length() > 0) crawlName = "sitemap loader for " + sitemapURLStr; // in case that a root url has a file protocol, then the site filter does not work, patch // that: if (fullDomain) { for (DigestURL u : rootURLs) if (u.isFile()) { fullDomain = false; subPath = true; break; } } // delete old robots entries for (DigestURL ru : rootURLs) { sb.robots.delete(ru); try { if (ru.getHost() != null) { // might be null for file:// Cache.delete(RobotsTxt.robotsURL(RobotsTxt.getHostPort(ru)).hash()); } } catch (IOException e) { } } try { sb.robots.clear(); } catch (IOException e) { } // to be safe: clear all. // set the crawl filter String ipMustMatch = post.get("ipMustmatch", CrawlProfile.MATCH_ALL_STRING); final String ipMustNotMatch = post.get("ipMustnotmatch", CrawlProfile.MATCH_NEVER_STRING); if (ipMustMatch.length() < 2) ipMustMatch = CrawlProfile.MATCH_ALL_STRING; final String countryMustMatch = post.getBoolean("countryMustMatchSwitch") ? post.get("countryMustMatchList", "") : ""; sb.setConfig("crawlingIPMustMatch", ipMustMatch); sb.setConfig("crawlingIPMustNotMatch", ipMustNotMatch); if (countryMustMatch.length() > 0) sb.setConfig("crawlingCountryMustMatch", countryMustMatch); String crawlerNoDepthLimitMatch = post.get("crawlingDepthExtension", CrawlProfile.MATCH_NEVER_STRING); final String indexUrlMustMatch = post.get("indexmustmatch", CrawlProfile.MATCH_ALL_STRING); final String indexUrlMustNotMatch = post.get("indexmustnotmatch", CrawlProfile.MATCH_NEVER_STRING); final String indexContentMustMatch = post.get("indexcontentmustmatch", CrawlProfile.MATCH_ALL_STRING); final String indexContentMustNotMatch = post.get("indexcontentmustnotmatch", CrawlProfile.MATCH_NEVER_STRING); final boolean crawlOrder = post.get("crawlOrder", "off").equals("on"); env.setConfig("crawlOrder", crawlOrder); if (crawlOrder) crawlerNoDepthLimitMatch = CrawlProfile.MATCH_NEVER_STRING; // without limitation the crawl order does not work int newcrawlingdepth = post.getInt("crawlingDepth", 8); env.setConfig("crawlingDepth", Integer.toString(newcrawlingdepth)); if ((crawlOrder) && (newcrawlingdepth > 8)) newcrawlingdepth = 8; boolean directDocByURL = "on" .equals( post.get( "directDocByURL", "off")); // catch also all linked media documents without loading them env.setConfig("crawlingDirectDocByURL", directDocByURL); final String collection = post.get("collection", "user"); env.setConfig("collection", collection); // recrawl final String recrawl = post.get("recrawl", "nodoubles"); // nodoubles, reload, scheduler Date crawlingIfOlder = null; if ("reload".equals(recrawl)) { crawlingIfOlder = timeParser( true, post.getInt("reloadIfOlderNumber", -1), post.get("reloadIfOlderUnit", "year")); // year, month, day, hour } env.setConfig( "crawlingIfOlder", crawlingIfOlder == null ? Long.MAX_VALUE : crawlingIfOlder.getTime()); // store this call as api call sb.tables.recordAPICall( post, "Crawler_p.html", WorkTables.TABLE_API_TYPE_CRAWLER, "crawl start for " + ((rootURLs.size() == 0) ? post.get("crawlingFile", "") : rootURLs.iterator().next().toNormalform(true))); final boolean crawlingDomMaxCheck = "on".equals(post.get("crawlingDomMaxCheck", "off")); final int crawlingDomMaxPages = (crawlingDomMaxCheck) ? post.getInt("crawlingDomMaxPages", -1) : -1; env.setConfig("crawlingDomMaxPages", Integer.toString(crawlingDomMaxPages)); boolean followFrames = "on".equals(post.get("followFrames", "false")); env.setConfig("followFrames", followFrames); boolean obeyHtmlRobotsNoindex = "on".equals(post.get("obeyHtmlRobotsNoindex", "false")); env.setConfig("obeyHtmlRobotsNoindex", obeyHtmlRobotsNoindex); boolean obeyHtmlRobotsNofollow = "on".equals(post.get("obeyHtmlRobotsNofollow", "false")); env.setConfig("obeyHtmlRobotsNofollow", obeyHtmlRobotsNofollow); final boolean indexText = "on".equals(post.get("indexText", "false")); env.setConfig("indexText", indexText); final boolean indexMedia = "on".equals(post.get("indexMedia", "false")); env.setConfig("indexMedia", indexMedia); env.setConfig("storeHTCache", storeHTCache); String defaultAgentName = sb.isIntranetMode() ? ClientIdentification.yacyIntranetCrawlerAgentName : ClientIdentification.yacyInternetCrawlerAgentName; String agentName = post.get("agentName", defaultAgentName); ClientIdentification.Agent agent = ClientIdentification.getAgent(agentName); if (agent == null) agent = ClientIdentification.getAgent(defaultAgentName); CacheStrategy cachePolicy = CacheStrategy.parse(post.get("cachePolicy", "iffresh")); if (cachePolicy == null) cachePolicy = CacheStrategy.IFFRESH; String crawlingMode = post.get("crawlingMode", "url"); if ("file".equals(crawlingMode) && post.containsKey("crawlingFile")) { newcrawlingMustNotMatch = CrawlProfile.MATCH_NEVER_STRING; directDocByURL = false; } if ("sitemap".equals(crawlingMode)) { newcrawlingMustMatch = CrawlProfile.MATCH_ALL_STRING; newcrawlingMustNotMatch = CrawlProfile.MATCH_NEVER_STRING; newcrawlingdepth = 0; directDocByURL = false; } if ("sitelist".equals(crawlingMode)) { newcrawlingMustNotMatch = CrawlProfile.MATCH_NEVER_STRING; Set<DigestURL> newRootURLs = new HashSet<DigestURL>(); for (DigestURL sitelistURL : rootURLs) { // download document Document scraper; try { scraper = sb.loader.loadDocument( sitelistURL, CacheStrategy.IFFRESH, BlacklistType.CRAWLER, agent); // get links and generate filter for (DigestURL u : scraper.getHyperlinks().keySet()) { newRootURLs.add(u); } } catch (final IOException e) { ConcurrentLog.logException(e); } } rootURLs = newRootURLs; crawlingMode = "url"; if ((fullDomain || subPath) && newcrawlingdepth > 0) newcrawlingMustMatch = CrawlProfile .MATCH_ALL_STRING; // to prevent that there is a restriction on the original // urls } // delete all error urls for that domain // and all urls for that host from the crawl queue Set<String> hosthashes = new HashSet<String>(); boolean anysmbftporpdf = false; for (DigestURL u : rootURLs) { sb.index.fulltext().remove(u.hash()); hosthashes.add(u.hosthash()); if ("smb.ftp".indexOf(u.getProtocol()) >= 0 || "pdf".equals(MultiProtocolURL.getFileExtension(u.getFileName()))) anysmbftporpdf = true; } sb.crawlQueues.removeHosts(hosthashes); sb.index.fulltext().commit(true); boolean crawlingQ = anysmbftporpdf || "on".equals(post.get("crawlingQ", "off")) || "sitemap".equals(crawlingMode); env.setConfig("crawlingQ", crawlingQ); // compute mustmatch filter according to rootURLs if ((fullDomain || subPath) && newcrawlingdepth > 0) { String siteFilter = ".*"; if (fullDomain) { siteFilter = CrawlProfile.siteFilter(rootURLs); if (deleteold) { sb.index.fulltext().deleteStaleDomainHashes(hosthashes, deleteageDate); } } else if (subPath) { siteFilter = CrawlProfile.subpathFilter(rootURLs); if (deleteold) { for (DigestURL u : rootURLs) { String basepath = u.toNormalform(true); if (!basepath.endsWith("/")) { int p = basepath.lastIndexOf("/"); if (p > 0) basepath = basepath.substring(0, p + 1); } int count = sb.index.fulltext().remove(basepath, deleteageDate); if (count > 0) ConcurrentLog.info( "Crawler_p", "deleted " + count + " documents for host " + u.getHost()); } } } if (CrawlProfile.MATCH_ALL_STRING.equals(newcrawlingMustMatch)) { newcrawlingMustMatch = siteFilter; } else if (!CrawlProfile.MATCH_ALL_STRING.equals(siteFilter)) { // combine both newcrawlingMustMatch = "(" + newcrawlingMustMatch + ")|(" + siteFilter + ")"; } } // check if the crawl filter works correctly try { Pattern mmp = Pattern.compile(newcrawlingMustMatch); for (DigestURL u : rootURLs) { assert mmp.matcher(u.toNormalform(true)).matches() : "pattern " + mmp.toString() + " does not match url " + u.toNormalform(true); } } catch (final PatternSyntaxException e) { prop.put("info", "4"); // crawlfilter does not match url prop.putHTML("info_newcrawlingfilter", newcrawlingMustMatch); prop.putHTML("info_error", e.getMessage()); } boolean hasCrawlstartDataOK = !crawlName.isEmpty(); if (hasCrawlstartDataOK) { // check crawlurl was given in sitecrawl if ("url".equals(crawlingMode) && rootURLs.size() == 0) hasCrawlstartDataOK = false; } String snapshotsMaxDepthString = post.get("snapshotsMaxDepth", "-1"); int snapshotsMaxDepth = Integer.parseInt(snapshotsMaxDepthString); boolean snapshotsLoadImage = post.getBoolean("snapshotsLoadImage"); boolean snapshotsReplaceOld = post.getBoolean("snapshotsReplaceOld"); String snapshotsMustnotmatch = post.get("snapshotsMustnotmatch", ""); // get vocabulary scraper info JSONObject vocabulary_scraper = new JSONObject(); // key = vocabulary_name, value = properties with key = type (i.e. // 'class') and value = keyword in context for (String key : post.keySet()) { if (key.startsWith("vocabulary_")) { if (key.endsWith("_class")) { String vocabulary = key.substring(11, key.length() - 6); String value = post.get(key); if (value != null && value.length() > 0) { JSONObject props; try { props = vocabulary_scraper.getJSONObject(vocabulary); } catch (JSONException e) { props = new JSONObject(); vocabulary_scraper.put(vocabulary, props); } props.put("class", value); } } } } int timezoneOffset = post.getInt("timezoneOffset", 0); // in case that we crawl from a file, load that file and re-compute mustmatch pattern List<AnchorURL> hyperlinks_from_file = null; if ("file".equals(crawlingMode) && post.containsKey("crawlingFile") && crawlingFile != null) { final String crawlingFileContent = post.get("crawlingFile$file", ""); try { // check if the crawl filter works correctly final ContentScraper scraper = new ContentScraper( new DigestURL(crawlingFile), 10000000, new VocabularyScraper(), timezoneOffset); final Writer writer = new TransformerWriter(null, null, scraper, null, false); if (crawlingFile != null && crawlingFile.exists()) { FileUtils.copy(new FileInputStream(crawlingFile), writer); } else { FileUtils.copy(crawlingFileContent, writer); } writer.close(); // get links and generate filter hyperlinks_from_file = scraper.getAnchors(); if (newcrawlingdepth > 0) { if (fullDomain) { newcrawlingMustMatch = CrawlProfile.siteFilter(hyperlinks_from_file); } else if (subPath) { newcrawlingMustMatch = CrawlProfile.subpathFilter(hyperlinks_from_file); } } } catch (final Exception e) { // mist prop.put("info", "7"); // Error with file prop.putHTML("info_crawlingStart", crawlingFileName); prop.putHTML("info_error", e.getMessage()); ConcurrentLog.logException(e); } sb.continueCrawlJob(SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL); } // prepare a new crawling profile final CrawlProfile profile; byte[] handle; if (hasCrawlstartDataOK) { profile = new CrawlProfile( crawlName, newcrawlingMustMatch, newcrawlingMustNotMatch, ipMustMatch, ipMustNotMatch, countryMustMatch, crawlerNoDepthLimitMatch, indexUrlMustMatch, indexUrlMustNotMatch, indexContentMustMatch, indexContentMustNotMatch, newcrawlingdepth, directDocByURL, crawlingIfOlder, crawlingDomMaxPages, crawlingQ, followFrames, obeyHtmlRobotsNoindex, obeyHtmlRobotsNofollow, indexText, indexMedia, storeHTCache, crawlOrder, snapshotsMaxDepth, snapshotsLoadImage, snapshotsReplaceOld, snapshotsMustnotmatch, cachePolicy, collection, agentName, new VocabularyScraper(vocabulary_scraper), timezoneOffset); handle = ASCII.getBytes(profile.handle()); // before we fire up a new crawl, we make sure that another crawl with the same name is // not running sb.crawler.removeActive(handle); sb.crawler.removePassive(handle); try { sb.crawlQueues.noticeURL.removeByProfileHandle(profile.handle(), 10000); } catch (final SpaceExceededException e1) { } } else { profile = null; handle = null; } // start the crawl if ("url".equals(crawlingMode)) { if (rootURLs.size() == 0) { prop.put("info", "5"); // Crawling failed prop.putHTML("info_crawlingURL", "(no url given)"); prop.putHTML("info_reasonString", "you must submit at least one crawl url"); } else { // stack requests sb.crawler.putActive(handle, profile); final Set<DigestURL> successurls = new HashSet<DigestURL>(); final Map<DigestURL, String> failurls = new HashMap<DigestURL, String>(); sb.stackURLs(rootURLs, profile, successurls, failurls); if (failurls.size() == 0) { // liftoff! prop.put("info", "8"); prop.putHTML("info_crawlingURL", post.get("crawlingURL")); // generate a YaCyNews if the global flag was set if (!sb.isRobinsonMode() && crawlOrder) { final Map<String, String> m = new HashMap<String, String>(profile); // must be cloned m.remove("specificDepth"); m.remove("indexText"); m.remove("indexMedia"); m.remove("remoteIndexing"); m.remove("xsstopw"); m.remove("xpstopw"); m.remove("xdstopw"); m.remove("storeTXCache"); m.remove("storeHTCache"); m.remove("generalFilter"); m.remove("specificFilter"); m.put("intention", post.get("intention", "").replace(',', '/')); sb.peers.newsPool.publishMyNews( sb.peers.mySeed(), NewsPool.CATEGORY_CRAWL_START, m); } } else { StringBuilder fr = new StringBuilder(); for (Map.Entry<DigestURL, String> failure : failurls.entrySet()) { sb.crawlQueues.errorURL.push( failure.getKey(), 0, null, FailCategory.FINAL_LOAD_CONTEXT, failure.getValue(), -1); fr.append(failure.getValue()).append('/'); } prop.put("info", "5"); // Crawling failed prop.putHTML("info_crawlingURL", (post.get("crawlingURL"))); prop.putHTML("info_reasonString", fr.toString()); } if (successurls.size() > 0) sb.continueCrawlJob(SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL); } } else if ("sitemap".equals(crawlingMode)) { try { final DigestURL sitemapURL = sitemapURLStr.indexOf("//") > 0 ? new DigestURL(sitemapURLStr) : new DigestURL( rootURLs.iterator().next(), sitemapURLStr); // fix for relative paths which should not exist but are // used anyway sb.crawler.putActive(handle, profile); final SitemapImporter importer = new SitemapImporter(sb, sitemapURL, profile); importer.start(); sb.continueCrawlJob(SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL); } catch (final Exception e) { // mist prop.put("info", "6"); // Error with url prop.putHTML("info_crawlingStart", sitemapURLStr); prop.putHTML("info_error", e.getMessage()); ConcurrentLog.logException(e); } } else if ("file".equals(crawlingMode)) { if (post.containsKey("crawlingFile") && crawlingFile != null && hyperlinks_from_file != null) { try { if (newcrawlingdepth > 0) { if (fullDomain) { newcrawlingMustMatch = CrawlProfile.siteFilter(hyperlinks_from_file); } else if (subPath) { newcrawlingMustMatch = CrawlProfile.subpathFilter(hyperlinks_from_file); } } sb.crawler.putActive(handle, profile); sb.crawlStacker.enqueueEntriesAsynchronous( sb.peers.mySeed().hash.getBytes(), profile.handle(), hyperlinks_from_file, profile.timezoneOffset()); } catch (final PatternSyntaxException e) { prop.put("info", "4"); // crawlfilter does not match url prop.putHTML("info_newcrawlingfilter", newcrawlingMustMatch); prop.putHTML("info_error", e.getMessage()); } catch (final Exception e) { // mist prop.put("info", "7"); // Error with file prop.putHTML("info_crawlingStart", crawlingFileName); prop.putHTML("info_error", e.getMessage()); ConcurrentLog.logException(e); } sb.continueCrawlJob(SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL); } } } } /* * <input id="customPPM" name="customPPM" type="number" min="10" max="30000" style="width:46px" value="#[customPPMdefault]#" />PPM <input id="latencyFactor" name="latencyFactor" type="number" min="0.1" max="3.0" step="0.1" style="width:32px" value="#[latencyFactorDefault]#" />LF <input id="MaxSameHostInQueue" name="MaxSameHostInQueue" type="number" min="1" max="30" style="width:32px" value="#[MaxSameHostInQueueDefault]#" />MH <input type="submit" name="crawlingPerformance" value="set" /> (<a href="/Crawler_p.html?crawlingPerformance=minimum">min</a>/<a href="/Crawler_p.html?crawlingPerformance=maximum">max</a>) </td> */ if (post != null && post.containsKey("crawlingPerformance")) { final String crawlingPerformance = post.get("crawlingPerformance", "custom"); final long LCbusySleep1 = sb.getConfigLong(SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL_BUSYSLEEP, 1000L); int wantedPPM = (LCbusySleep1 == 0) ? 30000 : (int) (60000L / LCbusySleep1); try { wantedPPM = post.getInt("customPPM", wantedPPM); } catch (final NumberFormatException e) { } if ("minimum".equals(crawlingPerformance.toLowerCase())) wantedPPM = 10; if ("maximum".equals(crawlingPerformance.toLowerCase())) wantedPPM = 30000; int wPPM = wantedPPM; if (wPPM <= 0) { wPPM = 1; } if (wPPM >= 30000) { wPPM = 30000; } final int newBusySleep = 60000 / wPPM; // for wantedPPM = 10: 6000; for wantedPPM = 1000: 60 final float loadprereq = wantedPPM <= 10 ? 1.0f : wantedPPM <= 100 ? 2.0f : wantedPPM >= 1000 ? 8.0f : 3.0f; BusyThread thread; thread = sb.getThread(SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL); if (thread != null) { sb.setConfig( SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL_BUSYSLEEP, thread.setBusySleep(newBusySleep)); sb.setConfig( SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL_LOADPREREQ, thread.setLoadPreReqisite(loadprereq)); thread.setLoadPreReqisite(loadprereq); thread.setIdleSleep(2000); } float latencyFactor = post.getFloat("latencyFactor", 0.5f); int MaxSameHostInQueue = post.getInt("MaxSameHostInQueue", 20); env.setConfig(SwitchboardConstants.CRAWLER_LATENCY_FACTOR, latencyFactor); env.setConfig(SwitchboardConstants.CRAWLER_MAX_SAME_HOST_IN_QUEUE, MaxSameHostInQueue); } // performance settings final long LCbusySleep = env.getConfigLong(SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL_BUSYSLEEP, 1000L); final int LCppm = (int) (60000L / Math.max(1, LCbusySleep)); prop.put("customPPMdefault", Integer.toString(LCppm)); prop.put( "latencyFactorDefault", env.getConfigFloat(SwitchboardConstants.CRAWLER_LATENCY_FACTOR, 0.5f)); prop.put( "MaxSameHostInQueueDefault", env.getConfigInt(SwitchboardConstants.CRAWLER_MAX_SAME_HOST_IN_QUEUE, 20)); // generate crawl profile table int count = 0; boolean dark = true; final int domlistlength = (post == null) ? 160 : post.getInt("domlistlength", 160); CrawlProfile profile; // put active crawls into list String hosts = ""; for (final byte[] h : sb.crawler.getActive()) { profile = sb.crawler.getActive(h); if (CrawlSwitchboard.DEFAULT_PROFILES.contains(profile.name())) continue; profile.putProfileEntry("crawlProfilesShow_list_", prop, true, dark, count, domlistlength); prop.put("crawlProfilesShow_list_" + count + "_debug", debug ? 1 : 0); if (debug) { RowHandleSet urlhashes = sb.crawler.getURLHashes(h); prop.put( "crawlProfilesShow_list_" + count + "_debug_count", urlhashes == null ? "unknown" : Integer.toString(urlhashes.size())); } hosts = hosts + "," + profile.name(); dark = !dark; count++; } prop.put("crawlProfilesShow_debug", debug ? 1 : 0); prop.put("crawlProfilesShow_list", count); prop.put("crawlProfilesShow_count", count); prop.put("crawlProfilesShow", count == 0 ? 0 : 1); prop.put("crawlProfilesShow_linkstructure", 0); if (post != null) { // handle config button to display graphic if (post.get("hidewebstructuregraph") != null) sb.setConfig(SwitchboardConstants.DECORATION_GRAFICS_LINKSTRUCTURE, false); if (post.get("showwebstructuregraph") != null) sb.setConfig(SwitchboardConstants.DECORATION_GRAFICS_LINKSTRUCTURE, true); } if (count > 0 && sb.getConfigBool(SwitchboardConstants.DECORATION_GRAFICS_LINKSTRUCTURE, true)) { // collect the host names for 'wide' crawls which can be visualized boolean showLinkstructure = hosts.length() > 0 && !hosts.contains("file:"); if (showLinkstructure) { StringBuilder q = new StringBuilder(); hosts = hosts.substring(1); q.append(CollectionSchema.host_s.getSolrFieldName()) .append(':') .append(hosts) .append(" OR ") .append(CollectionSchema.host_s.getSolrFieldName()) .append(':') .append("www.") .append(hosts); try { prop.put( "crawlProfilesShow_linkstructure", count == 1 && sb.index.fulltext().getDefaultConnector().getCountByQuery(q.toString()) > 0 ? 1 : 2); prop.put("crawlProfilesShow_linkstructure_hosts", hosts); } catch (IOException e) { } } } // return rewrite properties return prop; }
public static serverObjects respond( @SuppressWarnings("unused") final RequestHeader header, final serverObjects post, final serverSwitch env) { // return variable that accumulates replacements final Switchboard sb = (Switchboard) env; final serverObjects prop = new serverObjects(); final Segment segment = sb.index; final SolrConnector connector = segment.fulltext().getDefaultConnector(); // avoid UNRESOLVED PATTERN prop.put("url", ""); prop.put("citations", 0); prop.put("sentences", 0); DigestURL uri = null; String url = ""; String hash = ""; int ch = 10; boolean filter = false; // show cited sentences only if (post != null) { if (post.containsKey("url")) { url = post.get("url"); if (!url.startsWith("http://") && !url.startsWith("https://") && !url.startsWith("ftp://") && !url.startsWith("smb://") && !url.startsWith("file://")) { url = "http://" + url; } } if (post.containsKey("hash")) { hash = post.get("hash"); } if (post.containsKey("ch")) { ch = post.getInt("ch", ch); } filter = post.getBoolean("filter"); } prop.put("filter", filter); if (url.length() > 0) { try { uri = new DigestURL(url, null); hash = ASCII.String(uri.hash()); } catch (final MalformedURLException e) { } } if (uri == null && hash.length() > 0) { try { uri = sb.getURL(ASCII.getBytes(hash)); if (uri == null) { connector.commit(true); // try again, that url can be fresh uri = sb.getURL(ASCII.getBytes(hash)); } } catch (IOException e) { ConcurrentLog.logException(e); } } if (uri == null) return prop; // no proper url addressed url = uri.toNormalform(true); prop.put("url", url); // get the document from the index SolrDocument doc; try { doc = segment .fulltext() .getDefaultConnector() .getDocumentById( hash, CollectionSchema.title.getSolrFieldName(), CollectionSchema.text_t.getSolrFieldName()); } catch (final IOException e1) { return prop; } @SuppressWarnings("unchecked") ArrayList<String> title = (ArrayList<String>) doc.getFieldValue(CollectionSchema.title.getSolrFieldName()); String text = (String) doc.getFieldValue(CollectionSchema.text_t.getSolrFieldName()); ArrayList<String> sentences = new ArrayList<String>(); if (title != null) for (String s : title) if (s.length() > 0) sentences.add(s); if (text != null && !text.isEmpty()) { SentenceReader sr = new SentenceReader(text); StringBuilder line; while (sr.hasNext()) { line = sr.next(); if (line.length() > 0) sentences.add(line.toString()); } } // for each line make a statistic about the number of occurrences somewhere else OrderedScoreMap<String> scores = new OrderedScoreMap<String>(null); // accumulates scores for citating urls LinkedHashMap<String, Set<DigestURL>> sentenceOcc = new LinkedHashMap<String, Set<DigestURL>>(); for (String sentence : sentences) { if (sentence == null || sentence.length() < 40) { // do not count the very short sentences sentenceOcc.put(sentence, null); continue; } try { sentence = sentence.replace('"', '\''); SolrDocumentList doclist = connector.getDocumentListByQuery( "text_t:\"" + sentence + "\"", CollectionSchema.url_chars_i.getSolrFieldName() + " asc", 0, 100, CollectionSchema.sku.getSolrFieldName()); int count = (int) doclist.getNumFound(); if (count > 0) { Set<DigestURL> list = new TreeSet<DigestURL>(); for (SolrDocument d : doclist) { String u = (String) d.getFieldValue(CollectionSchema.sku.getSolrFieldName()); if (u == null || u.equals(url)) continue; scores.inc(u); try { list.add(new DigestURL(u, null)); } catch (final MalformedURLException e) { } } sentenceOcc.put(sentence, list); } } catch (final Throwable ee) { } } sentences.clear(); // we do not need this again // iterate the sentences int i = 0; int sentenceNr = 0; for (Map.Entry<String, Set<DigestURL>> se : sentenceOcc.entrySet()) { Set<DigestURL> app = se.getValue(); if (filter) { // prepare list, only include sentence with citation if (app != null && app.size() > 0) { StringBuilder dd = new StringBuilder(se.getKey()); prop.put("sentences_" + i + "_dt", sentenceNr); dd.append("<br/>appears in:"); for (DigestURL u : app) { if (u != null) { dd.append(" <a href=\"") .append(u.toNormalform(false)) .append("\">") .append(u.getHost()) .append("</a>"); } } prop.put("sentences_" + i + "_dd", dd.toString()); i++; } } else { // prepare list, include all sentences StringBuilder dd = new StringBuilder(se.getKey()); prop.put("sentences_" + i + "_dt", sentenceNr); if (app != null && app.size() > 0) { dd.append("<br/>appears in:"); for (DigestURL u : app) { if (u != null) { dd.append(" <a href=\"") .append(u.toNormalform(false)) .append("\">") .append(u.getHost()) .append("</a>"); } } } prop.put("sentences_" + i + "_dd", dd.toString()); i++; } sentenceNr++; } prop.put("sentences", i); // iterate the citations in order of number of citations i = 0; for (String u : scores.keyList(false)) { try { DigestURL uu = new DigestURL(u, null); prop.put("citations_" + i + "_dt", "<a href=\"" + u + "\">" + u + "</a>"); StringBuilder dd = new StringBuilder(); dd.append("makes ") .append(Integer.toString(scores.get(u))) .append(" citations: of ") .append(url); for (Map.Entry<String, Set<DigestURL>> se : sentenceOcc.entrySet()) { Set<DigestURL> occurls = se.getValue(); if (occurls != null && occurls.contains(uu)) dd.append("<br/><a href=\"/solr/select?q=text_t:%22") .append(se.getKey().replace('"', '\'')) .append("%22&rows=100&grep=&wt=grephtml\">") .append(se.getKey()) .append("</a>"); } prop.put("citations_" + i + "_dd", dd.toString()); i++; } catch (final MalformedURLException e) { } } prop.put("citations", i); // find similar documents from different hosts i = 0; for (String u : scores.keyList(false)) { if (scores.get(u) < ch) continue; try { DigestURL uu = new DigestURL(u, null); if (uu.getOrganization().equals(uri.getOrganization())) continue; prop.put("similar_links_" + i + "_url", u); i++; } catch (final MalformedURLException e) { } } prop.put("similar_links", i); prop.put("similar", i > 0 ? 1 : 0); // return rewrite properties return prop; }
public static serverObjects respond( @SuppressWarnings("unused") final RequestHeader header, final serverObjects post, final serverSwitch env) { final Switchboard sb = (Switchboard) env; final serverObjects prop = new serverObjects(); Collection<Tagging> vocs = LibraryProvider.autotagging.getVocabularies(); String vocabularyName = (post == null) ? null : post.get("vocabulary", null); String discovername = (post == null) ? null : post.get("discovername", null); Tagging vocabulary = vocabularyName == null ? null : LibraryProvider.autotagging.getVocabulary(vocabularyName); if (vocabulary == null) vocabularyName = null; if (post != null) { try { // create a vocabulary if (vocabulary == null && discovername != null && discovername.length() > 0) { // store this call as api call sb.tables.recordAPICall( post, "Vocabulary_p.html", WorkTables.TABLE_API_TYPE_CRAWLER, "vocabulary creation for " + discovername); // get details of creation String discoverobjectspace = post.get("discoverobjectspace", ""); MultiProtocolURL discoveruri = null; if (discoverobjectspace.length() > 0) try { discoveruri = new MultiProtocolURL(discoverobjectspace); } catch (final MalformedURLException e) { } if (discoveruri == null) discoverobjectspace = ""; Map<String, Tagging.SOTuple> table = new LinkedHashMap<String, Tagging.SOTuple>(); File propFile = LibraryProvider.autotagging.getVocabularyFile(discovername); final boolean discoverNot = post.get("discovermethod", "").equals("none"); final boolean discoverFromPath = post.get("discovermethod", "").equals("path"); final boolean discoverFromTitle = post.get("discovermethod", "").equals("title"); final boolean discoverFromTitleSplitted = post.get("discovermethod", "").equals("titlesplitted"); final boolean discoverFromAuthor = post.get("discovermethod", "").equals("author"); final boolean discoverFromCSV = post.get("discovermethod", "").equals("csv"); final String discoverFromCSVPath = post.get("discoverpath", "").replaceAll("%20", " "); String discoverFromCSVCharset = post.get("charset", StandardCharsets.UTF_8.name()); final int discovercolumnliteral = post.getInt("discovercolumnliteral", 0); final int discovercolumnsynonyms = post.getInt("discovercolumnsynonyms", -1); final int discovercolumnobjectlink = post.getInt("discovercolumnobjectlink", -1); final File discoverFromCSVFile = discoverFromCSVPath.length() > 0 ? new File(discoverFromCSVPath) : null; final boolean discoverenrichsynonyms = post.get("discoversynonymsmethod", "none").equals("enrichsynonyms"); final boolean discoverreadcolumn = post.get("discoversynonymsmethod", "none").equals("readcolumn"); Segment segment = sb.index; String t; if (!discoverNot) { if (discoverFromCSV && discoverFromCSVFile != null && discoverFromCSVFile.exists()) { // auto-detect charset, used code from http://jchardet.sourceforge.net/; see also: // http://www-archive.mozilla.org/projects/intl/chardet.html if (discoverFromCSVCharset.equals("autodetect")) { List<String> charsets = FileUtils.detectCharset(discoverFromCSVFile); discoverFromCSVCharset = charsets.get(0); ConcurrentLog.info( "FileUtils", "detected charset: " + discoverFromCSVCharset + " used to read " + discoverFromCSVFile.toString()); } // read file (try-with-resource to close inputstream automatically) try (BufferedReader r = new BufferedReader( new InputStreamReader( new FileInputStream(discoverFromCSVFile), discoverFromCSVCharset))) { String line = null; Pattern semicolon = Pattern.compile(";"); Map<String, String> synonym2literal = new HashMap<>(); // helper map to check if there are double synonyms while ((line = r.readLine()) != null) { if (line.length() == 0) continue; String[] l = semicolon.split(line); if (l.length == 0) l = new String[] {line}; String literal = discovercolumnliteral < 0 || l.length <= discovercolumnliteral ? null : l[discovercolumnliteral].trim(); if (literal == null) continue; literal = normalizeLiteral(literal); String objectlink = discovercolumnobjectlink < 0 || l.length <= discovercolumnobjectlink ? null : l[discovercolumnobjectlink].trim(); if (literal.length() > 0) { String synonyms = ""; if (discoverenrichsynonyms) { Set<String> sy = SynonymLibrary.getSynonyms(literal); if (sy != null) { for (String s : sy) synonyms += "," + s; } } else if (discoverreadcolumn) { synonyms = discovercolumnsynonyms < 0 || l.length <= discovercolumnsynonyms ? null : l[discovercolumnsynonyms].trim(); synonyms = normalizeLiteral(synonyms); } else { synonyms = Tagging.normalizeTerm(literal); } // check double synonyms if (synonyms.length() > 0) { String oldliteral = synonym2literal.get(synonyms); if (oldliteral != null && !literal.equals(oldliteral)) { // replace old entry with combined new table.remove(oldliteral); String newliteral = oldliteral + "," + literal; literal = newliteral; } synonym2literal.put(synonyms, literal); } // store term table.put( literal, new Tagging.SOTuple(synonyms, objectlink == null ? "" : objectlink)); } } } } else { Iterator<DigestURL> ui = segment.urlSelector(discoveruri, Long.MAX_VALUE, 100000); while (ui.hasNext()) { DigestURL u = ui.next(); String u0 = u.toNormalform(true); t = ""; if (discoverFromPath) { int exp = u0.lastIndexOf('.'); if (exp < 0) continue; int slp = u0.lastIndexOf('/', exp); if (slp < 0) continue; t = u0.substring(slp, exp); int p; while ((p = t.indexOf(':')) >= 0) t = t.substring(p + 1); while ((p = t.indexOf('=')) >= 0) t = t.substring(p + 1); } if (discoverFromTitle || discoverFromTitleSplitted) { URIMetadataNode m = segment.fulltext().getMetadata(u.hash()); if (m != null) t = m.dc_title(); if (t.endsWith(".jpg") || t.endsWith(".gif")) continue; } if (discoverFromAuthor) { URIMetadataNode m = segment.fulltext().getMetadata(u.hash()); if (m != null) t = m.dc_creator(); } t = t.replaceAll("_", " ") .replaceAll("\"", " ") .replaceAll("'", " ") .replaceAll(",", " ") .replaceAll(" ", " ") .trim(); if (t.isEmpty()) continue; if (discoverFromTitleSplitted) { String[] ts = CommonPattern.SPACES.split(t); for (String s : ts) { if (s.isEmpty()) continue; if (s.endsWith(".jpg") || s.endsWith(".gif")) continue; table.put(s, new Tagging.SOTuple(Tagging.normalizeTerm(s), u0)); } } else if (discoverFromAuthor) { String[] ts = CommonPattern.SEMICOLON.split(t); // author names are often separated by ';' for (String s : ts) { if (s.isEmpty()) continue; int p = s.indexOf(','); // check if there is a reversed method to mention the name if (p >= 0) s = s.substring(p + 1).trim() + " " + s.substring(0, p).trim(); table.put(s, new Tagging.SOTuple(Tagging.normalizeTerm(s), u0)); } } else { table.put(t, new Tagging.SOTuple(Tagging.normalizeTerm(t), u0)); } } } } Tagging newvoc = new Tagging(discovername, propFile, discoverobjectspace, table); LibraryProvider.autotagging.addVocabulary(newvoc); vocabularyName = discovername; vocabulary = newvoc; } else if (vocabulary != null) { // check if objectspace was set vocabulary.setObjectspace( post.get( "objectspace", vocabulary.getObjectspace() == null ? "" : vocabulary.getObjectspace())); // check if a term was added if (post.get("add_new", "").equals("checked") && post.get("newterm", "").length() > 0) { String objectlink = post.get("newobjectlink", ""); if (objectlink.length() > 0) try { objectlink = new MultiProtocolURL(objectlink).toNormalform(true); } catch (final MalformedURLException e) { } vocabulary.put(post.get("newterm", ""), post.get("newsynonyms", ""), objectlink); } // check if a term was modified for (Map.Entry<String, String> e : post.entrySet()) { if (e.getKey().startsWith("modify_") && e.getValue().equals("checked")) { String term = e.getKey().substring(7); String synonyms = post.get("synonyms_" + term, ""); String objectlink = post.get("objectlink_" + term, ""); vocabulary.put(term, synonyms, objectlink); } } // check if the vocabulary shall be cleared if (post.get("clear_table", "").equals("checked")) { vocabulary.clear(); } // check if the vocabulary shall be deleted if (post.get("delete_vocabulary", "").equals("checked")) { LibraryProvider.autotagging.deleteVocabulary(vocabularyName); vocabulary = null; vocabularyName = null; } // check if a term shall be deleted if (vocabulary != null && vocabulary.size() > 0) for (Map.Entry<String, String> e : post.entrySet()) { if (e.getKey().startsWith("delete_") && e.getValue().equals("checked")) { String term = e.getKey().substring(7); vocabulary.delete(term); } } // check the isFacet property if (vocabulary != null && post.containsKey("set")) { boolean isFacet = post.getBoolean("isFacet"); vocabulary.setFacet(isFacet); Set<String> omit = env.getConfigSet("search.result.show.vocabulary.omit"); if (isFacet) omit.remove(vocabularyName); else omit.add(vocabularyName); env.setConfig("search.result.show.vocabulary.omit", omit); } } } catch (final IOException e) { ConcurrentLog.logException(e); } } int count = 0; for (Tagging v : vocs) { prop.put("vocabularyset_" + count + "_name", v.getName()); prop.put( "vocabularyset_" + count + "_selected", ((vocabularyName != null && vocabularyName.equals(v.getName())) || (discovername != null && discovername.equals(v.getName()))) ? 1 : 0); count++; } prop.put("vocabularyset", count); prop.put("create", vocabularyName == null ? 1 : 0); if (vocabulary == null) { prop.put("edit", 0); } else { prop.put("edit", 1); boolean editable = vocabulary.getFile() != null && vocabulary.getFile().exists(); prop.put("edit_editable", editable ? 1 : 0); prop.putHTML("edit_editable_file", editable ? vocabulary.getFile().getAbsolutePath() : ""); prop.putHTML("edit_name", vocabulary.getName()); prop.putXML("edit_namexml", vocabulary.getName()); prop.putHTML("edit_namespace", vocabulary.getNamespace()); prop.put("edit_isFacet", vocabulary.isFacet() ? 1 : 0); prop.put("edit_size", vocabulary.size()); prop.putHTML("edit_predicate", vocabulary.getPredicate()); prop.putHTML("edit_prefix", Tagging.DEFAULT_PREFIX); prop.putHTML( "edit_editable_objectspace", vocabulary.getObjectspace() == null ? "" : vocabulary.getObjectspace()); prop.putHTML("edit_editable_objectspacepredicate", DCTerms.references.getPredicate()); int c = 0; boolean dark = false; int osl = vocabulary.getObjectspace() == null ? 0 : vocabulary.getObjectspace().length(); Map<String, SOTuple> list = vocabulary.list(); prop.put("edit_size", list.size()); for (Map.Entry<String, SOTuple> entry : list.entrySet()) { prop.put("edit_terms_" + c + "_editable", editable ? 1 : 0); prop.put("edit_terms_" + c + "_dark", dark ? 1 : 0); dark = !dark; prop.putXML( "edit_terms_" + c + "_label", osl > entry.getValue().getObjectlink().length() ? entry.getKey() : entry.getValue().getObjectlink().substring(osl)); prop.putHTML("edit_terms_" + c + "_term", entry.getKey()); prop.putXML("edit_terms_" + c + "_termxml", entry.getKey()); prop.putHTML("edit_terms_" + c + "_editable_term", entry.getKey()); String synonymss = entry.getValue().getSynonymsCSV(); prop.putHTML("edit_terms_" + c + "_editable_synonyms", synonymss); if (synonymss.length() > 0) { String[] synonymsa = entry.getValue().getSynonymsList(); for (int i = 0; i < synonymsa.length; i++) { prop.put("edit_terms_" + c + "_synonyms_" + i + "_altLabel", synonymsa[i]); } prop.put("edit_terms_" + c + "_synonyms", synonymsa.length); } else { prop.put("edit_terms_" + c + "_synonyms", 0); } prop.putXML("edit_terms_" + c + "_editable_objectlink", entry.getValue().getObjectlink()); c++; if (c > 3000) break; } prop.put("edit_terms", c); } // make charset list for import method selector prop.putHTML("create_charset_" + 0 + "_name", "autodetect"); prop.put("create_charset_" + 0 + "_selected", 1); int c = 1; for (String cs : Charset.availableCharsets().keySet()) { prop.putHTML("create_charset_" + c + "_name", cs); prop.put("create_charset_" + c + "_selected", 0); c++; } prop.put("create_charset", c); // return rewrite properties return prop; }