示例#1
0
/**
 * RTF text extractor
 *
 * @author Wouter Heijke
 * @version $Id$
 */
public class SwingRTFExtractor implements Extractor {
  private static final Logger log = Logging.getLoggerInstance(SwingRTFExtractor.class);

  private String mimetype = "application/rtf";

  @Override
  public void setMimeType(String mimetype) {
    this.mimetype = mimetype;
  }

  @Override
  public String getMimeType() {
    return this.mimetype;
  }

  @Override
  public String extract(InputStream input) throws Exception {
    log.debug("extract stream");
    String result = null;
    DefaultStyledDocument styledDoc = new DefaultStyledDocument();
    try {
      new RTFEditorKit().read(input, styledDoc, 0);
      result = styledDoc.getText(0, styledDoc.getLength());
    } catch (IOException e) {
      throw new Exception("Cannot extract text from a RTF document", e);
    } catch (BadLocationException e) {
      throw new Exception("Cannot extract text from a RTF document", e);
    }
    return result;
  }
}
示例#2
0
/**
 * @author Michiel Meeuwissen
 * @since MMBase-1.9.2
 */
public class BitrateLabeler extends Labeler {
  private static final Logger log = Logging.getLoggerInstance(BitrateLabeler.class);
  private static final String CONFIG_TAG = MainFilter.FILTERCONFIG_TAG + ".bitrates";

  private final Map<String, BitrateInfo> bitrates = new LinkedHashMap<String, BitrateInfo>();

  private String key = "bitrate";

  private boolean overwrite = true;

  public void setKey(String k) {
    key = k;
  }

  public void setOverwrite(boolean o) {
    overwrite = o;
  }

  @Override
  public void configure(DocumentReader reader, Element element) {
    bitrates.clear();
    try {
      for (Element bitrate :
          DocumentReader.getChildElements(reader.getElementByPath(element, CONFIG_TAG))) {
        BitrateInfo bri = new BitrateInfo(bitrate);
        log.debug("Adding BitrateInfo " + bri);
        bitrates.put(bri.getName(), bri);
      }
    } catch (Exception ex) {
      log.error("Error in filter.xml:" + ex, ex);
    }
    log.info("Configured bit rate labeler " + bitrates);
    FilterUtils.propertiesConfigure(this, reader, element);
  }

  @Override
  protected void label(URLComposer uc) {
    for (Map.Entry<String, BitrateInfo> entry : bitrates.entrySet()) {
      int bitrate = uc.getSource().getIntValue("bitrate");
      if (entry.getValue().matches(bitrate)) {
        log.debug("" + bitrate + " matched " + entry);
        if (overwrite || !uc.getInfo().containsKey(key)) {
          uc.getInfo().put(key, entry.getKey());
        }
      }
    }
  }
}
示例#3
0
/**
 * The resources builder can be used by {@link org.mmbase.util.ResourceLoader} to load resources
 * from (configuration files, classes, resourcebundles).
 *
 * @author Michiel Meeuwissen
 * @version $Id$
 * @since MMBase-1.8
 */
public class Resources extends Attachments {
  private static final Logger log = Logging.getLoggerInstance(Resources.class);

  /** Implements virtual filename field. {@inheritDoc} */
  @Override
  public Object getValue(MMObjectNode node, String field) {
    if (field.equals(NodeURLStreamHandlerFactory.FILENAME_FIELD)) {
      String s = node.getStringValue(NodeURLStreamHandlerFactory.RESOURCENAME_FIELD);
      int i = s.lastIndexOf("/");
      if (i > 0) {
        return s.substring(i + 1);
      } else {
        return s;
      }
    } else {
      return super.getValue(node, field);
    }
  }
}
示例#4
0
/**
 * A VWM that manages the files by scheduling them to be send to one or more mirror sites. Requests
 * for scheduling is done in the netfile builder. This VWM handles those netfile requests whose
 * service is 'pages'. Available subservices are 'main' and 'mirror'. Requests for file copy are
 * checked periodically. This results in one or more requests for a 'mirror' service, which then
 * result in a file copy request, which is handled in a separate thread. This VWM also has methods
 * for recalculating pages and handling page changes (which in turn result in a request for file
 * copy.) Entry point for these requests are the FileChange methods from the {@link
 * VwmServiceInterface}.
 *
 * @author Daniel Ockeloen
 * @author Pierre van Rooden (javadocs)
 * @version $Id$
 */
public class PageMaster extends Vwm implements MMBaseObserver, VwmServiceInterface {

  private static final Logger log = Logging.getLoggerInstance(PageMaster.class);

  // field used to skip first probeCall (why???)
  boolean first = true;

  Object syncobj = new Object(); // used in commented code

  /** Queue containing the file-copy tasks that need to be performed by {@link #filecopier} */
  Queue files2copy = new Queue(128);
  /** Thread that handles the actual file transfers. */
  FileCopier filecopier = new FileCopier(files2copy);
  /** Cache for mirror servers */
  Vector mirrornodes;

  // Hashtable properties; (unused)

  /** Constructor for the PageMaster VWM. */
  public PageMaster() {
    log.debug("ready for action");
  }

  /**
   * Performs general periodic maintenance. This routine handles alle open pages/main and
   * pages/mirror file service requests. These requests are obtained from the netfiles builder. For
   * each file that should be serviced, the filechange method is called. This routine handles a
   * maximum of 10 page/main, and 50 page/mirror service calls each time it is called. The first
   * time this method is call, nothing happens (?)
   *
   * @return <code>true</code> if maintenance was performed, <code>false</code> otherwise
   */
  public boolean probeCall() {
    if (first) {
      // skip first time this method is called
      first = false;
    } else {
      // handle up to 10 pages/main fileservice requests
      try {
        Netfiles bul = (Netfiles) Vwms.getMMBase().getMMObject("netfiles");
        // Enumeration e=bul.search("WHERE service='pages' AND subservice='main' AND
        // status="+Netfiles.STATUS_REQUEST+" ORDER BY number DESC");
        Enumeration e =
            bul.search("service=='pages'+subservice=='main'+status=" + Netfiles.STATUS_REQUEST);
        int i = 0;
        while (e.hasMoreElements() && i < 10) {
          MMObjectNode node = (MMObjectNode) e.nextElement();
          fileChange("" + node.getIntValue("number"), "c");
          i++;
        }
      } catch (Exception e) {
        log.error(Logging.stackTrace(e));
      }
      // handle up to 50 pages/mirror fileservice requests
      try {
        Netfiles bul = (Netfiles) Vwms.getMMBase().getMMObject("netfiles");
        Enumeration e =
            bul.search("service=='pages'+subservice=='mirror'+status=" + Netfiles.STATUS_REQUEST);
        // Enumeration e=bul.search("WHERE service='pages' AND subservice='mirror' AND
        // status="+Netfiles.STATUS_REQUEST+" ORDER BY number DESC");
        int i = 0;
        while (e.hasMoreElements() && i < 50) {
          MMObjectNode node = (MMObjectNode) e.nextElement();
          fileChange("" + node.getIntValue("number"), "c");
          i++;
        }
      } catch (Exception e) {
        log.error(Logging.stackTrace(e));
      }
    }
    return true;
  }

  /**
   * Called when a remote node is changed.
   *
   * @param machine Name of the machine that changed the node.
   * @param number Number of the changed node as a <code>String</code>
   * @param builder type of the changed node
   * @param ctype command type, 'c'=changed, 'd'=deleted', 'r'=relations changed, 'n'=new
   * @return <code>true</code>
   */
  public boolean nodeRemoteChanged(String machine, String number, String builder, String ctype) {
    return nodeChanged(machine, number, builder, ctype);
  }

  /**
   * Called when a local node is changed.
   *
   * @param machine Name of the machine that changed the node.
   * @param number Number of the changed node as a <code>String</code>
   * @param builder type of the changed node
   * @param ctype command type, 'c'=changed, 'd'=deleted', 'r'=relations changed, 'n'=new
   * @return <code>true</code>
   */
  public boolean nodeLocalChanged(String machine, String number, String builder, String ctype) {
    return nodeChanged(machine, number, builder, ctype);
  }

  /**
   * Called when a local or remote node is changed. Does not take any action.
   *
   * @param machine Name of the machine that changed the node.
   * @param number Number of the changed node as a <code>String</code>
   * @param builder type of the changed node
   * @param ctype command type, 'c'=changed, 'd'=deleted', 'r'=relations changed, 'n'=new
   * @return <code>true</code>
   */
  public boolean nodeChanged(String machine, String number, String builder, String ctype) {
    // log.debug("sees that : "+number+" has changed type="+ctype);
    return true;
  }

  /**
   * Schedules a service-request on a file. Only "pages/main" services are handled. The
   * service-request is later handled through the {@link #probeCall} method.
   *
   * @param service the service to be performed
   * @param subservice the subservice to be performed
   * @param filename the filename to service
   * @return <code>true</code> if maintenance was performed, <code>false</code> otherwise
   */
  public boolean fileChange(String service, String subservice, String filename) {
    log.debug("frontend change -> " + filename);
    log.service("s=" + service + " sub=" + subservice + "file=" + filename);
    // jump to correct subhandles based on the subservice
    if (subservice.equals("main")) {
      handleMainCheck(service, subservice, filename);
    }
    return true;
  }

  /**
   * Handles a service-request on a file, registered in the netfiles builder. Depending on the
   * subservice requested, this routine calls {@link #handleMirror} or {@link #handleMain}.
   *
   * @param number Number of the node in the netfiles buidler than contain service request
   *     information.
   * @param ctype the type of change on that node ("c" : node was changed)
   * @return <code>true</code>
   */
  public boolean fileChange(String number, String ctype) {
    // log.debug("fileChange="+number+" "+ctype);
    // first get the change node so we can see what is the matter with it.
    Netfiles bul = (Netfiles) Vwms.getMMBase().getMMObject("netfiles");
    MMObjectNode filenode = bul.getNode(number);
    if (filenode != null) {
      // obtain all the basic info on the file.
      String service = filenode.getStringValue("service");
      String subservice = filenode.getStringValue("subservice");
      int status = filenode.getIntValue("status");

      // jump to correct subhandles based on the subservice
      if (subservice.equals("main")) {
        return handleMain(filenode, status, ctype);
      } else if (subservice.equals("mirror")) {
        return handleMirror(filenode, status, ctype);
      }
    }
    return true;
  }

  /**
   * Handles a pages/mirror service request. Places a page in the file2copy queue, so it will be
   * sent to a mirror site by the FileCopier.
   *
   * @param filenode the filenet node that contains the service request
   * @param status the current status of the node
   * @param ctype the type of change on that node ("c" : node was changed)
   * @return <code>true</code>
   */
  public boolean handleMirror(MMObjectNode filenode, int status, String ctype) {
    switch (status) {
      case Netfiles.STATUS_REQUEST: // Request
        // register the node as being On Its Way
        filenode.setValue("status", Netfiles.STATUS_ON_ITS_WAY);
        filenode.commit();
        String filename = filenode.getStringValue("filename");
        String dstserver = filenode.getStringValue("mmserver");
        // recover the correct source/dest properties for this mirror
        //
        // why does it say "demoserver" ??
        //
        String sshpath = getProperty("demoserver", "sshpath");
        log.debug("sshpath=" + sshpath);
        String srcpath = getProperty("demoserver", "path");
        log.debug("srcpath=" + srcpath);
        String dstuser = getProperty(dstserver, "user");
        log.debug("dstuser="******"host");
        log.debug("dsthost=" + dsthost);
        String dstpath = getProperty(dstserver, "path");
        log.debug("dstpath=" + dstpath);

        /* this code can be dropped as it is handled in FileCopier

                SCPcopy scpcopy=new SCPcopy(sshpath,dstuser,dsthost,dstpath);

                synchronized(syncobj) {
                    scpcopy.copy(srcpath,filename);
                }
        */
        // create a new file2copy object and add it to the queue,
        // so the FileCopier thread will handle it.
        files2copy.append(new aFile2Copy(dstuser, dsthost, dstpath, srcpath, filename, sshpath));

        // register the node as being Done
        filenode.setValue("status", Netfiles.STATUS_DONE);
        filenode.commit();
        break;
      case Netfiles.STATUS_ON_ITS_WAY: // On its way
        break;
      case Netfiles.STATUS_DONE: // Done
        break;
    }
    return true;
  }

  /**
   * Handles a pages/main service request. The events handled are:<br>
   * - requests for handling: schedules requests to mirror this page using {@link #doMainRequest}
   * <br>
   * - changed: page is scheduled to be recalculated<br>
   * - recaculate" page is recaclcutated and scheduled to be handled<br>
   *
   * @param filenode the netfiles node that contains the service request
   * @param status the current status of the node
   * @param ctype the type of change on that node ("c" : node was changed)
   * @return <code>true</code>
   */
  public boolean handleMain(MMObjectNode filenode, int status, String ctype) {
    switch (status) {
      case Netfiles.STATUS_REQUEST: // Request
        // register the node as being On Its Way
        filenode.setValue("status", Netfiles.STATUS_ON_ITS_WAY);
        filenode.commit();
        // do stuff
        doMainRequest(filenode);
        // register the node as being Done
        filenode.setValue("status", Netfiles.STATUS_DONE);
        filenode.commit();
        break;
      case Netfiles.STATUS_ON_ITS_WAY: // On Its Way
        break;
      case Netfiles.STATUS_DONE: // Done
        break;
      case Netfiles.STATUS_CHANGED: // Dirty (?)
        filenode.setValue("status", Netfiles.STATUS_CALC_PAGE);
        filenode.commit();
        break;
      case Netfiles.STATUS_CALC_PAGE: // Recalculate Page
        String filename = filenode.getStringValue("filename");
        calcPage(filename);
        filenode.setValue("status", Netfiles.STATUS_REQUEST);
        filenode.commit();
        break;
    }
    return true;
  }

  /**
   * Handles a main subservice on a page. The page is scheduled to be sent to all appropriate
   * mirrorsites for this service, by setting the request status in the associated mirror nodes. If
   * no mirror nodes are associated with this page, nothing happens.
   *
   * @param filenode the netfiles node with the original (main) request
   */
  public boolean doMainRequest(MMObjectNode filenode) {
    // so this file has changed probably, check if the file is ready on
    // disk and set the mirrors to request.
    String filename = filenode.getStringValue("filename");

    // find and change all the mirror nodes so they get resend
    Netfiles bul = (Netfiles) Vwms.getMMBase().getMMObject("netfiles");
    Enumeration e =
        bul.search("WHERE filename='" + filename + "' AND service='pages' AND subservice='mirror'");
    while (e.hasMoreElements()) {
      MMObjectNode mirrornode = (MMObjectNode) e.nextElement();
      mirrornode.setValue("status", Netfiles.STATUS_REQUEST);
      mirrornode.commit();
    }
    return true;
  }

  /**
   * Schedules a netfile object to be send to its mirror sites. The routine searches the appropriate
   * netfile node, and sets its status to 'request'. If a node does not exits, a new node is
   * created. In the latter case, the system also creates mirrornodes for each mirrorsite associated
   * with this service.
   *
   * @param service the service to be performed
   * @param subservice the subservice to be performed
   * @param filename the filename to service
   */
  public void handleMainCheck(String service, String subservice, String filename) {
    log.debug("Reached handleMainCheck");
    Netfiles bul = (Netfiles) Vwms.getMMBase().getMMObject("netfiles");
    Enumeration e =
        bul.search(
            "WHERE filename='"
                + filename
                + "' AND service='"
                + service
                + "' AND subservice='"
                + subservice
                + "'");
    if (e.hasMoreElements()) {
      MMObjectNode mainnode = (MMObjectNode) e.nextElement();
      mainnode.setValue("status", Netfiles.STATUS_REQUEST);
      mainnode.commit();
    } else {
      MMObjectNode mainnode = bul.getNewNode("system");
      mainnode.setValue("filename", filename);
      mainnode.setValue("mmserver", Vwms.getMMBase().getMachineName());
      mainnode.setValue("service", service);
      mainnode.setValue("subservice", subservice);
      mainnode.setValue("status", Netfiles.STATUS_REQUEST);
      mainnode.setValue("filesize", -1);
      bul.insert("system", mainnode);

      Enumeration f = getMirrorNodes(service).elements();
      while (f.hasMoreElements()) {
        MMObjectNode n2 = (MMObjectNode) f.nextElement();
        // hack hack also have to create mirror nodes !
        mainnode = bul.getNewNode("system");
        mainnode.setValue("filename", filename);
        mainnode.setValue("mmserver", n2.getStringValue("name"));
        mainnode.setValue("service", service);
        mainnode.setValue("subservice", "mirror");
        mainnode.setValue("status", Netfiles.STATUS_DONE);
        mainnode.setValue("filesize", -1);
        bul.insert("system", mainnode);
      }
    }
  }

  /**
   * Retrieves a named property of a server.
   *
   * @param machine name of the server
   * @param key name of the property to retrieve
   * @return the property value
   */
  public String getProperty(String machine, String key) {
    MMServers mmservers = (MMServers) Vwms.getMMBase().getMMObject("mmservers");
    return mmservers.getMMServerProperty(machine, key);
  }

  /**
   * Recalculate a page. Invokes the SCAN parser (which will re-cache the page through the scancache
   * module) Only works for SCAN.
   *
   * @param url of the page to cache
   */
  public void calcPage(String url) {
    scanparser m = (scanparser) Vwms.getMMBase().getModule("SCANPARSER");
    url = url.substring(0, url.length() - 5);
    url = url.replace(':', '?');
    log.debug("getPage=" + url);
    if (m != null) {
      scanpage sp = new scanpage();
      m.calcPage(url, sp, 0);
    }
  }

  /**
   * Retrieves a list of Mirror Servers. This is done by obtaining a fileserver node and retrieving
   * associated mmserver nodes. This method should be renamed and moved to the netfilesrv builder.
   *
   * @param service preseumably the service to query for. Unused.
   * @return a <code>Vector</code> containing mmserver nodes that act as mirror server for this
   *     service
   */
  public Vector getMirrorNodes(String service) {
    if (mirrornodes != null) return mirrornodes;
    NetFileSrv bul = (NetFileSrv) Vwms.getMMBase().getMMObject("netfilesrv");
    if (bul != null) {
      Enumeration e = bul.search("service=='pages'+subservice=='mirror'");
      if (e.hasMoreElements()) {
        MMObjectNode n1 = (MMObjectNode) e.nextElement();
        mirrornodes = n1.getRelatedNodes("mmservers");
        if (mirrornodes != null) return mirrornodes;
      }
    }
    mirrornodes = new Vector();
    return mirrornodes;
  }
}
示例#5
0
/**
 * A cache implementation backed by a {@link java.util.LinkedHashMap}, in access-order mode, and
 * restricted maximal size ('Least Recently Used' cache algorithm).
 *
 * @author Michiel Meeuwissen
 * @version $Id$
 * @see org.mmbase.cache.Cache
 * @since MMBase-1.8.6
 */
public class LRUCache<K, V> implements CacheImplementationInterface<K, V> {

  private static final Logger log = Logging.getLoggerInstance(LRUCache.class);

  private int maxSize;
  private final Map<K, V> backing;

  public LRUCache() {
    this(100);
  }

  public LRUCache(int size) {
    maxSize = size;
    // caches can typically be accessed/modified by multiple threads, so we need to synchronize
    backing =
        Collections.synchronizedMap(
            new LinkedHashMap<K, V>(size, 0.75f, true) {
              private static final long serialVersionUID = 0L;

              @Override
              protected boolean removeEldestEntry(Map.Entry<K, V> eldest) {
                int overSized = size() - LRUCache.this.maxSize;
                if (overSized <= 0) {
                  return false;
                } else if (overSized == 1) {
                  // Using iterator to manualy remove the eldest rather then return true to make
                  // absolutely sure that one
                  // disappears, because that seems to fail sometimes for QueryResultCache.

                  final Iterator<K> i = keySet().iterator();
                  K actualEldest = i.next();
                  i.remove();
                  overSized = size() - LRUCache.this.maxSize;
                  while (overSized > 0) {
                    // if for some reason a key changed in the cache, even 1 i.remove may not
                    // shrink the cache.
                    log.warn(
                        "cache didn't shrink (a)"
                            + eldest.getKey()
                            + " ["
                            + eldest.getKey().getClass()
                            + "] ["
                            + eldest.getKey().hashCode()
                            + "]");
                    log.warn(
                        "cache didn't shrink (b)"
                            + actualEldest
                            + " ["
                            + actualEldest.getClass()
                            + "] ["
                            + actualEldest.hashCode()
                            + "]");
                    actualEldest = i.next();
                    i.remove();
                    overSized = size() - LRUCache.this.maxSize;
                  }
                  assert overSized <= 0;
                  return false;
                } else {
                  log.warn("How is this possible? Oversized: " + overSized);
                  log.debug("because", new Exception());
                  if (overSized > 10) {
                    log.error(
                        "For some reason this cache grew much too big ("
                            + size()
                            + " >> "
                            + LRUCache.this.maxSize
                            + "). This must be some kind of bug. Resizing now.");
                    clear();
                  }
                  return false;
                }
              }
            });
  }

  @Override
  public int getCount(K key) {
    return -1;
  }

  /**
   * Change the maximum size of the table. This may result in removal of entries in the table.
   *
   * @param size the new desired size
   */
  @Override
  public void setMaxSize(int size) {
    if (size < 0) {
      throw new IllegalArgumentException("Cannot set size to negative value " + size);
    }
    maxSize = size;
    synchronized (backing) {
      while (size() > maxSize) {
        try {
          Iterator<K> i = keySet().iterator();
          i.next();
          i.remove();
        } catch (Exception e) {
          log.warn(e);
          // ConcurentModification?
        }
      }
    }
  }

  @Override
  public final int maxSize() {
    return maxSize;
  }

  /** Returns size, maxSize. */
  @Override
  public String toString() {
    return "Size=" + size() + ", Max=" + maxSize;
  }

  @Override
  public void config(Map<String, String> map) {
    // needs no configuration.
  }

  @Override
  public Object getLock() {
    return backing;
  }

  // wrapping for synchronization
  @Override
  public int size() {
    return backing.size();
  }

  @Override
  public boolean isEmpty() {
    return backing.isEmpty();
  }

  @Override
  @SuppressWarnings("element-type-mismatch")
  public boolean containsKey(Object key) {
    return backing.containsKey(key);
  }

  @Override
  @SuppressWarnings("element-type-mismatch")
  public boolean containsValue(Object value) {
    return backing.containsValue(value);
  }

  @Override
  @SuppressWarnings("element-type-mismatch")
  public V get(Object key) {
    return backing.get(key);
  }

  @Override
  public V put(K key, V value) {
    return backing.put(key, value);
  }

  @Override
  @SuppressWarnings("element-type-mismatch")
  public V remove(Object key) {
    return backing.remove(key);
  }

  @Override
  public void putAll(Map<? extends K, ? extends V> map) {
    backing.putAll(map);
  }

  @Override
  public void clear() {
    backing.clear();
  }

  @Override
  public Set<K> keySet() {
    return backing.keySet();
  }

  @Override
  public Set<Map.Entry<K, V>> entrySet() {
    return backing.entrySet();
  }

  @Override
  public Collection<V> values() {
    return backing.values();
  }
}
示例#6
0
/**
 * If for some reason you also need to do Queries next to MMBase.
 *
 * @author Michiel Meeuwissen
 * @version $Id$
 */
public class JdbcIndexDefinition implements IndexDefinition {

  private static final Logger log = Logging.getLoggerInstance(JdbcIndexDefinition.class);

  private static int directConnections = 0;

  private static final int CACHE_SIZE = 10 * 1024;
  protected static Cache<String, LazyMap> nodeCache =
      new Cache<String, LazyMap>(CACHE_SIZE) {
        {
          putCache();
        }

        @Override
        public final String getName() {
          return "LuceneJdbcNodes";
        }

        @Override
        public final String getDescription() {
          return "Node identifier -> Map";
        }
      };

  private final DataSource dataSource;
  private final String key;
  private final String identifier;
  private final String indexSql;
  private final String findSql;
  private final Analyzer analyzer;

  private final Set<String> keyWords = new HashSet<String>();
  private final Map<String, Indexer.Multiple> nonDefaultMultiples =
      new HashMap<String, Indexer.Multiple>();
  private final Map<String, Float> boosts = new HashMap<String, Float>();

  private final Collection<IndexDefinition> subQueries = new ArrayList<IndexDefinition>();

  private final boolean isSub;

  private String id;

  JdbcIndexDefinition(
      DataSource ds,
      Element element,
      Set allIndexedFields,
      boolean storeText,
      boolean mergeText,
      Analyzer a,
      boolean isSub) {
    this.dataSource = ds;
    indexSql = element.getAttribute("sql");
    key = element.getAttribute("key");
    String elementId = element.getAttribute("identifier");
    identifier = "".equals(elementId) ? key : elementId;
    findSql = element.getAttribute("find");
    NodeList childNodes = element.getChildNodes();
    for (int k = 0; k < childNodes.getLength(); k++) {
      if (childNodes.item(k) instanceof Element) {
        Element childElement = (Element) childNodes.item(k);
        if ("field".equals(childElement.getLocalName())) {
          if (childElement.getAttribute("keyword").equals("true")) {
            keyWords.add(childElement.getAttribute("name"));
          }
          String m = childElement.getAttribute("multiple");
          if ("".equals(m)) m = "add";
          if (!m.equals("add")) {
            nonDefaultMultiples.put(
                childElement.getAttribute("name"), Indexer.Multiple.valueOf(m.toUpperCase()));
          }
          String b = childElement.getAttribute("boost");
          if (!b.equals("")) {
            boosts.put(childElement.getAttribute("name"), Float.valueOf(b));
          }
        } else if ("related".equals(childElement.getLocalName())) {
          subQueries.add(
              new JdbcIndexDefinition(
                  ds, childElement, allIndexedFields, storeText, mergeText, a, true));
        }
      }
    }
    this.analyzer = a;
    this.isSub = isSub;
    assert !isSub || "".equals(findSql);
  }

  @Override
  public void setId(String i) {
    id = i;
  }

  @Override
  public String getId() {
    return id;
  }

  /**
   * Jdbc connection pooling of MMBase would kill the statement if too duratious. This produces a
   * 'direct connection' in that case, to circumvent that problem (Indexing queries _may_ take a
   * while).
   */
  protected Connection getDirectConnection() throws SQLException {
    directConnections++;
    try {
      if (dataSource instanceof GenericDataSource) {
        return ((GenericDataSource) dataSource).getDirectConnection();
      } else {
        return dataSource.getConnection();
      }
    } catch (SQLException sqe) {
      log.error("With direct connection #" + directConnections + ": " + sqe.getMessage());
      throw sqe;
    } catch (Throwable t) {
      throw new RuntimeException("direct connection #" + directConnections, t);
    }
  }

  @Override
  public Analyzer getAnalyzer() {
    return analyzer;
  }

  protected String getFindSql(String identifier) {
    assert !isSub;
    if (findSql == null || "".equals(findSql)) throw new RuntimeException("No find query defined");
    if (identifier == null) throw new RuntimeException("No find query defined");
    String s = findSql.replaceAll("\\[IDENTIFIER\\]", identifier);
    s = s.replaceAll("\\[KEY\\]", identifier); // deprecated
    return s;
  }

  @Override
  public boolean inIndex(String identifier) {
    CloseableIterator<JdbcEntry> i = getSqlCursor(getFindSql(identifier));
    boolean result = i.hasNext();
    try {
      i.close();
    } catch (IOException ex) {
      log.warn(ex);
    }
    return result;
  }

  protected String getSql(String identifier) {
    if (indexSql == null || "".equals(indexSql)) throw new RuntimeException("No sql defined");
    if (identifier == null) throw new RuntimeException("No query defined");
    String s = indexSql.replaceAll("\\[PARENTKEY\\]", identifier);
    s = s.replaceAll("\\[KEY\\]", identifier); // deprecated
    return s;
  }

  CloseableIterator<JdbcEntry> getSqlCursor(final String sql) {
    try {
      long start = System.currentTimeMillis();
      final Connection con = getDirectConnection();
      log.debug("About to execute " + sql + " (" + directConnections + ")");
      final Statement statement = con.createStatement();
      final ResultSet results = statement.executeQuery(sql);
      if (log.isDebugEnabled()) {
        log.debug("Executed " + sql + " in " + (System.currentTimeMillis() - start) + " ms");
      }
      final ResultSetMetaData meta = results.getMetaData();

      return new CloseableIterator<JdbcEntry>() {
        boolean hasNext = results.isBeforeFirst();
        int i = 0;

        @Override
        public boolean hasNext() {
          return hasNext;
        }

        @Override
        public JdbcEntry next() {
          if (!hasNext) {
            throw new NoSuchElementException();
          }
          try {
            results.next();
            hasNext = !results.isLast();
          } catch (java.sql.SQLException sqe) {
            log.error(sqe);
            hasNext = false;
          }
          JdbcEntry entry = new JdbcEntry(meta, results, sql);
          i++;
          if (log.isServiceEnabled()) {
            if (i % 100 == 0) {
              log.service("jdbc cursor " + i + " (now at id=" + entry.getIdentifier() + ")");
            } else if (log.isDebugEnabled()) {
              log.trace("jdbc cursor " + i + " (now at id=" + entry.getIdentifier() + ")");
            }
          }
          return entry;
        }

        @Override
        public void remove() {
          throw new UnsupportedOperationException();
        }

        @Override
        public void close() {
          log.debug("Closing " + con);
          try {
            if (results != null) results.close();
            if (statement != null) statement.close();
            if (con != null) con.close();
          } catch (Exception e) {
            log.error(e);
          }
        }
      };
    } catch (Exception e) {
      throw new RuntimeException(e.getMessage(), e);
    }
  }

  /**
   * A map representing a row in a database. But only filled when actually used. So, only on first
   * use, a query is done. And not before that.
   *
   * @since MMBase-1.9
   */
  protected class LazyMap extends AbstractMap<String, String> {
    private Map<String, String> map = null;
    private final Map<String, String> keys;
    private final String identifier;

    LazyMap(String identifier, Map<String, String> keys) {
      this.identifier = identifier;
      this.keys = keys;
    }

    protected void check() {
      if (map == null) {
        Connection connection = null;
        Statement statement = null;
        ResultSet results = null;
        try {
          connection = dataSource.getConnection();
          statement = connection.createStatement();
          long start = System.currentTimeMillis();
          String s = getFindSql(identifier);
          if (log.isTraceEnabled()) {
            log.trace("About to execute " + s + " because ", new Exception());
          }
          results = statement.executeQuery(s);
          ResultSetMetaData meta = results.getMetaData();
          map = new HashMap<String, String>();
          if (results.next()) {
            for (int i = 1; i <= meta.getColumnCount(); i++) {
              String value = org.mmbase.util.Casting.toString(results.getString(i));
              map.put(meta.getColumnName(i).toLowerCase(), value);
            }
          }
          long duration = (System.currentTimeMillis() - start);
          if (duration > 500) {
            log.warn("Executed " + s + " in " + duration + " ms");
          } else if (duration > 100) {
            log.debug("Executed " + s + " in " + duration + " ms");
          } else {
            log.trace("Executed " + s + " in " + duration + " ms");
          }
        } catch (Exception e) {
          throw new RuntimeException(e.getMessage(), e);
        } finally {
          if (results != null)
            try {
              results.close();
            } catch (Exception e) {
            }
          if (statement != null)
            try {
              statement.close();
            } catch (Exception e) {
            }
          if (connection != null)
            try {
              connection.close();
            } catch (Exception e) {
            }
        }
      }
    }

    @Override
    public Set<Map.Entry<String, String>> entrySet() {
      check();
      return map.entrySet();
    }

    @Override
    public int size() {
      check();
      return map.size();
    }

    @Override
    public String get(Object key) {
      if (JdbcIndexDefinition.this.identifier.equals(key)) return identifier;
      if (keys.containsKey(key)) return keys.get(key);
      check();
      return map.get(key);
    }

    @Override
    public boolean containsKey(Object key) {
      if (JdbcIndexDefinition.this.identifier.equals(key)) return true;
      if (keys.containsKey(key)) return true;
      check();
      return map.containsKey(key);
    }

    @Override
    public String toString() {
      if (map != null) {
        return map.toString();
      } else {
        return "[LAZY node " + identifier + "]";
      }
    }
  }

  @Override
  public org.mmbase.bridge.Node getNode(final Cloud userCloud, final Document doc) {
    String docId = doc.get("number");
    if (docId == null) {
      throw new IllegalArgumentException("No number found in " + doc);
    }
    LazyMap m = nodeCache.get(docId); //
    if (m == null) {
      Map<String, String> keys = new HashMap<String, String>();
      for (String keyWord : keyWords) {
        keys.put(keyWord, doc.get(keyWord));
      }
      m = new LazyMap(docId, keys);
      nodeCache.put(docId, m);
    }
    org.mmbase.bridge.Node node =
        new MapNode<String>(
            m,
            new MapNodeManager(userCloud, m) {
              @Override
              public boolean hasField(String name) {
                if (JdbcIndexDefinition.this.key.equals(name)) return true;
                return super.hasField(name);
              }

              @Override
              public org.mmbase.bridge.Field getField(String name) {
                if (map == null && JdbcIndexDefinition.this.key.equals(name)) {
                  org.mmbase.core.CoreField fd =
                      org.mmbase.core.util.Fields.createField(
                          name,
                          org.mmbase.core.util.Fields.classToType(Object.class),
                          org.mmbase.bridge.Field.TYPE_UNKNOWN,
                          org.mmbase.bridge.Field.STATE_VIRTUAL,
                          null);
                  return new org.mmbase.bridge.implementation.BasicField(fd, this);
                } else {
                  return super.getField(name);
                }
              }
            });
    if (log.isDebugEnabled()) {
      log.debug("Returning node for " + node);
    }
    return node;
  }

  @Override
  public CloseableIterator<JdbcEntry> getCursor() {
    assert !isSub;
    return getSqlCursor(indexSql);
  }

  @Override
  public CloseableIterator<JdbcEntry> getSubCursor(String identifier) {
    if (isSub) {
      log.debug("Using getSubCursor for " + identifier);
      return getSqlCursor(getSql(identifier));
    } else {
      return getSqlCursor(getFindSql(identifier));
    }
  }

  @Override
  public String toString() {
    return indexSql;
  }

  public class JdbcEntry implements IndexEntry {
    final ResultSetMetaData meta;
    final ResultSet results;
    final String sql;

    JdbcEntry(ResultSetMetaData m, ResultSet r, String s) {
      log.trace("new JDBC Entry");
      meta = m;
      results = r;
      sql = s;
    }

    @Override
    public void index(Document document) {
      if (log.isTraceEnabled()) {
        log.trace(
            "Indexing "
                + sql
                + " id="
                + JdbcIndexDefinition.this.identifier
                + ", key = "
                + JdbcIndexDefinition.this.key);
      }
      String id = getIdentifier();
      if (id != null) {
        document.add(
            new Field(
                "builder",
                "VIRTUAL BUILDER",
                Field.Store.YES,
                Field.Index.NOT_ANALYZED)); // keyword
        document.add(
            new Field(
                "number", getIdentifier(), Field.Store.YES, Field.Index.NOT_ANALYZED)); // keyword
      }
      try {
        for (int i = 1; i <= meta.getColumnCount(); i++) {
          String value = org.mmbase.util.Casting.toString(results.getString(i));
          if (log.isTraceEnabled()) {
            log.trace(
                "Indexing " + value + " for " + meta.getColumnName(i) + " on " + getIdentifier());
          }
          String fieldName = meta.getColumnName(i);
          if (keyWords.contains(fieldName)) {
            Indexer.addField(
                document,
                new Field(fieldName, value, Field.Store.YES, Field.Index.NOT_ANALYZED),
                nonDefaultMultiples.get(fieldName)); // keyword
          } else {
            Field field = new Field(fieldName, value, Field.Store.YES, Field.Index.ANALYZED);
            Float boost = boosts.get(fieldName);
            if (boost != null) {
              field.setBoost(boost);
            }
            Indexer.addField(document, field, nonDefaultMultiples.get(fieldName));
            Field fullText = new Field("fulltext", value, Field.Store.YES, Field.Index.ANALYZED);
            if (boost != null) {
              fullText.setBoost(boost);
            }
            document.add(fullText);
          }
        }
      } catch (SQLException sqe) {
        log.error(sqe.getMessage(), sqe);
      }
    }

    @Override
    public Collection<IndexDefinition> getSubDefinitions() {
      return JdbcIndexDefinition.this.subQueries;
    }

    @Override
    public String getIdentifier() {
      if (JdbcIndexDefinition.this.identifier != null
          && !JdbcIndexDefinition.this.identifier.equals("")) {
        try {
          return results.getString(JdbcIndexDefinition.this.identifier);
        } catch (SQLException sqe) {
          log.error(meta + " " + sqe.getMessage(), sqe);
          return "";
        }
      } else {
        return null;
      }
    }

    @Override
    public String getKey() {
      if (JdbcIndexDefinition.this.key != null && !JdbcIndexDefinition.this.key.equals("")) {
        try {
          return results.getString(JdbcIndexDefinition.this.key);
        } catch (SQLException sqe) {
          log.error(sqe.getMessage(), sqe);
          return "";
        }
      } else {
        return null;
      }
    }

    public Set<String> getIdentifiers() {
      Set<String> ids = new HashSet<String>();
      ids.add(getIdentifier());
      return ids;
    }
  }
}
示例#7
0
/**
 * Generic tool to execute some SQL
 *
 * @since MMBase-1.9.5
 * @author Michiel Meeuwissen
 * @version $Id$
 */
public class SqlExecutor implements Runnable {

  private static final Logger LOG = Logging.getLoggerInstance(SqlExecutor.class);

  private String query;
  private String update;
  private String onlyIfQuery;
  private Pattern ignore = Pattern.compile("");

  protected DataSource dataSource;
  protected String prefix;

  public void setDataSource(DataSource dataSource) {
    this.dataSource = dataSource;
  }

  public void setPrefix(String pref) {
    this.prefix = pref;
  }

  public DataSource getDataSource() {
    return dataSource;
  }

  public String getPrefix() {
    if (prefix == null) {
      return "$PREFIX";
    } else {
      return prefix;
    }
  }

  public void setIgnoreException(String e) {
    ignore = Pattern.compile(e);
  }

  public void setQuery(String q) {
    if (update != null) throw new IllegalStateException();
    query = q;
  }

  public void setUpdate(String u) {
    if (query != null) throw new IllegalStateException();
    update = u;
  }

  /**
   * A query returning either true of false. E.g. <param name="onlyIf"><![CDATA[select 1 = (select
   * count(*) from mm_versions where m_type='application' and name='Limburg' and m_version <
   * 7);]]></param>
   */
  public void setOnlyIf(String q) {
    onlyIfQuery = q;
  }

  protected void executeQuery(Statement stmt, String q) throws SQLException {
    q = q.replace("$PREFIX", getPrefix());
    LOG.info(" Executing " + q);
    ResultSet rs = stmt.executeQuery(q);
    StringBuilder header = new StringBuilder();
    for (int i = 1; i <= rs.getMetaData().getColumnCount(); i++) {
      if (i > 1) {
        header.append("|");
      }
      header.append(rs.getMetaData().getColumnName(i));
    }
    LOG.info(header);
    int seq = 0;
    while (true) {
      boolean valid = rs.next();
      if (!valid) break;
      seq++;
      StringBuilder line = new StringBuilder();
      for (int i = 1; i <= rs.getMetaData().getColumnCount(); i++) {
        if (i > 1) {
          line.append("|");
        }
        line.append(rs.getString(i));
      }
      LOG.info(seq + ":" + line);
    }
  }

  protected void executeUpdate(Statement stmt, String u) throws SQLException {
    u = u.replace("$PREFIX", getPrefix());
    LOG.info(" Executing update " + u);
    int result = stmt.executeUpdate(u);
    LOG.service("Result :" + result);
  }

  protected boolean executeOnlyIf(Connection con, String q) throws SQLException {
    if (q == null) return true;
    Statement stmt = null;
    try {
      stmt = con.createStatement();
      q = q.replace("$PREFIX", getPrefix());
      LOG.debug(" Executing query " + q);
      ResultSet rs = stmt.executeQuery(q);
      rs.next();
      boolean res = rs.getBoolean(1);
      LOG.debug("Result: " + res);
      return res;
    } catch (SQLException sqe) {
      LOG.error(sqe.getMessage() + " from " + q);
      throw sqe;
    } finally {
      try {
        if (stmt != null) {
          stmt.close();
        }
      } catch (Exception g) {
      }
    }
  }

  public void run() {
    Connection con = null;
    Statement stmt = null;
    try {
      DataSource ds = getDataSource();
      con = ds.getConnection();
      if (executeOnlyIf(con, onlyIfQuery)) {
        stmt = con.createStatement();
        if (query != null) {
          executeQuery(stmt, query);
        } else if (update != null) {
          executeUpdate(stmt, update);
        } else {
          throw new IllegalStateException("Both query and update properties are unset");
        }
      } else {
        LOG.debug("Skipped because of " + onlyIfQuery);
      }
    } catch (RuntimeException e) {
      throw e;
    } catch (Throwable t) {
      if (ignore.matcher(t.getMessage()).matches()) {
        LOG.info("Ignoring " + t.getMessage());
      } else {
        throw new RuntimeException(t.getMessage(), t);
      }
    } finally {
      try {
        if (stmt != null) {
          stmt.close();
        }
      } catch (Exception g) {
      }
      try {
        if (con != null) {
          con.close();
        }
      } catch (Exception g) {
      }
    }
  }

  @Override
  public String toString() {
    if (update != null) {
      return update;
    } else if (query != null) {
      return query;
    } else {
      return "No query yet";
    }
  }
}
示例#8
0
/**
 * @javadoc
 * @deprecated is this (cacheversionfile) used? seems obsolete now
 * @author Daniel Ockeloen
 * @version $Id$
 */
class VersionCacheNode {

  private static Logger log = Logging.getLoggerInstance(VersionCacheNode.class.getName());
  private MMObjectNode versionnode;
  private List<VersionCacheWhenNode> whens = new Vector<VersionCacheWhenNode>();
  private MMBase mmb;

  public VersionCacheNode(MMBase mmb) {
    this.mmb = mmb;
  }
  /** @javadoc */
  public void handleChanged(String buildername, int number) {
    // method checks if this really something valid
    // and we should signal a new version

    boolean dirty = false;
    for (VersionCacheWhenNode whennode : whens) {
      List<String> types = whennode.getTypes();

      // check if im known in the types part
      if (types.contains(buildername)) {
        // is there only 1 builder type ?
        if (log.isDebugEnabled()) log.debug("types=" + types.toString());
        if (types.size() == 1) {
          dirty = true;
        } else {
          // so multiple prepare a multilevel !
          List<String> nodes = whennode.getNodes();

          List<String> fields = new Vector<String>();
          fields.add(buildername + ".number");
          List<String> ordervec = new Vector<String>();
          List<String> dirvec = new Vector<String>();

          List<MMObjectNode> vec =
              mmb.getClusterBuilder()
                  .searchMultiLevelVector(
                      nodes,
                      fields,
                      "YES",
                      types,
                      buildername + ".number==" + number,
                      ordervec,
                      dirvec);
          if (log.isDebugEnabled()) log.debug("VEC=" + vec);
          if (vec != null && vec.size() > 0) {
            dirty = true;
          }
        }
      }
    }

    if (dirty) {
      // add one to the version of this counter
      int version = versionnode.getIntValue("version");
      versionnode.setValue("version", version + 1);
      versionnode.commit();
      if (log.isDebugEnabled()) log.debug("Changed = " + (version + 1));
    }
  }

  public void setVersionNode(MMObjectNode versionnode) {
    this.versionnode = versionnode;
  }

  /** @javadoc */
  public void addWhen(VersionCacheWhenNode when) {
    whens.add(when);
  }
}