Example #1
0
@Service
public abstract class FantasyGame {
  private static final Log LOG = LogFactory.getLog(FantasyGame.class);

  protected FantasyTeam fantasyTeam;

  public FantasyGame() {}

  public void tradeForTomorrow() throws Exception {
    tradeForDate(DateUtil.getGameTomorrow());
  }

  public void tradeForDate(Date date) throws Exception {
    BbcLeague bbcLeague = getLeague(date);

    Starters expectedStarters = fantasyTeam.getStrategy().pickStarters(date, bbcLeague);
    Starters actualStarters = tradeForStarters(expectedStarters);

    boolean startersSet = actualStarters.equals(expectedStarters);
    System.out.println("starters look good - " + startersSet);
  }

  public abstract BbcLeague getLeague(Date date) throws IOException;

  public abstract Starters tradeForStarters(Starters starters) throws IOException;
}
Example #2
0
/**
 * MDC server
 *
 * @author yjiang
 */
public abstract class MDCServer extends IoHandlerAdapter {

  static final Log log = LogFactory.getLog(MDCServer.class);

  static final AtomicInteger counter = new AtomicInteger(0);

  /** the the max size of a packet, 32KB */
  static int MAX_SIZE = 10240 * 1024; // test

  protected InetSocketAddress address;
  protected Selector selector;
  protected ServerSocketChannel server;
  protected final int PROCESS_NUMBER = 4;
  protected static Configuration _conf;

  protected IoAcceptor acceptor;
  protected boolean isRunning = false;

  protected boolean testKey() {
    String data = UID.random(24);
    byte[] bb = RSA.encode(data.getBytes(), TConn.pub_key);
    if (bb != null) {
      bb = RSA.decode(bb, TConn.pri_key);
      if (bb != null && data.equals(new String(bb))) {
        return true;
      }
    }

    return false;
  }

  /** Close. */
  public void close() {
    if (selector != null) {
      selector.wakeup();
      try {
        selector.close();
      } catch (IOException e1) {
        log.warn("close selector fails", e1);
      } finally {
        selector = null;
      }
    }

    if (server != null) {
      try {
        server.socket().close();
        server.close();
      } catch (IOException e) {
        log.warn("close socket server fails", e);
      } finally {
        server = null;
      }
    }
  }

  /**
   * Instantiates a new MDC server.
   *
   * @param host the host
   * @param port the port
   */
  protected MDCServer(String host, int port) {
    _conf = Config.getConfig();

    address = (host == null) ? new InetSocketAddress(port) : new InetSocketAddress(host, port);

    /** initialize app command */
    Command.init();

    /** initialize the connection center */
    TConnCenter.init(_conf, port);

    synchronized (_conf) {
      /** load public key from database */
      TConn.pub_key = SystemConfig.s("pub_key", null);
      TConn.pri_key = SystemConfig.s("pri_key", null);

      /** initialize the RSA key, hardcode 2048 bits */
      if (TConn.pub_key == null
          || TConn.pri_key == null
          || "".equals(TConn.pub_key)
          || "".equals(TConn.pri_key)) {
        /** print out the old state */
        log.warn(
            "the pub_key or pri_key missed, the old state are pub_key:["
                + TConn.pub_key
                + "], pri_key:["
                + TConn.pri_key
                + "]");

        Key k = RSA.generate(2048);
        TConn.pri_key = k.pri_key;
        TConn.pub_key = k.pub_key;

        /** print out the new public key */
        log.warn("create new RSA key pair, pub_key:[" + TConn.pub_key + ']');

        /** set back in database */
        SystemConfig.setConfig("pri_key", TConn.pri_key);
        SystemConfig.setConfig("pub_key", TConn.pub_key);
      }

      MAX_SIZE = SystemConfig.i("mdc.max_size", MAX_SIZE);
    }
  }

  /**
   * Start.
   *
   * @return the MDC server
   */
  public abstract MDCServer start();

  /** Stop. */
  public void stop() {
    acceptor.unbind();
  }

  /**
   * Service.
   *
   * @param o the o
   * @param session the session
   */
  void service(IoBuffer o, IoSession session) {
    try {
      // System.out.println(o.remaining() + "/" + o.capacity());

      session.setAttribute("last", System.currentTimeMillis());

      SimpleIoBuffer in = (SimpleIoBuffer) session.getAttribute("buf");
      if (in == null) {
        in = SimpleIoBuffer.create(4096);
        session.setAttribute("buf", in);
      }
      byte[] data = new byte[o.remaining()];
      o.get(data);
      in.append(data);

      // log.debug("recv: " + data.length + ", " +
      // session.getRemoteAddress());

      while (in.length() > 5) {
        in.mark();
        /**
         * Byte 1: head of the package<br>
         * bit 7-6: "01", indicator of MDC<br>
         * bit 5: encrypt indicator, "0": no; "1": encrypted<br>
         * bit 4: zip indicator, "0": no, "1": ziped<br>
         * bit 0-3: reserved<br>
         * Byte 2-5: length of data<br>
         * Byte[…]: data array<br>
         */
        byte head = in.read();
        /** test the head indicator, if not correct close it */
        if ((head & 0xC0) != 0x40) {
          log.info("flag is not correct! flag:" + head + ",from: " + session.getRemoteAddress());

          session.write("error.head");
          session.close(true);
          return;
        }

        int len = in.getInt();

        if (len <= 0 || len > MAX_SIZE) {
          log.error(
              "mdcserver.Wrong lendth: "
                  + len
                  + "/"
                  + MAX_SIZE
                  + " - "
                  + session.getRemoteAddress());
          session.write("error.packet.size");
          session.close(true);
          break;
        }

        // log.info("packet.len:" + len + ", len in buffer:" +
        // in.length());
        if (in.length() < len) {
          in.reset();
          break;
        } else {
          // do it

          byte[] b = new byte[len];
          in.read(b);

          // log.info("stub.package.size: " + len + ", head:" + head +
          // ", cmd:" + Bean.toString(b));
          // log.info("stub.package.size: " + len + ", head:" + head);

          /** test the zip flag */
          if ((head & 0x10) != 0) {
            b = Zip.unzip(b);
          }

          final TConn d = (TConn) session.getAttribute("conn");
          if (d != null) {
            /** test the encrypted flag */
            if ((head & 0x20) != 0) {
              b = DES.decode(b, d.deskey);
            }

            final byte[] bb = b;

            /** test if the packet is for mdc or app */
            new WorkerTask() {

              @Override
              public void onExecute() {
                d.process(bb);
              }
            }.schedule(0);

            session.setAttribute("last", System.currentTimeMillis());
          } else {
            session.write("error.getconnection");

            log.error("error to get connection: " + session.getRemoteAddress());
            session.close(true);
          }
        }
      }
    } catch (Throwable e) {
      log.error("closing stub: " + session.getRemoteAddress(), e);
      session.write("exception." + e.getMessage());
      session.close(true);
    }
  }

  /*
   * (non-Javadoc)
   *
   * @see
   * org.apache.mina.core.service.IoHandlerAdapter#sessionCreated(org.apache
   * .mina.core.session.IoSession)
   */
  public void sessionCreated(IoSession session) throws Exception {
    log.info("stub created:" + session.getRemoteAddress());

    Counter.add("mdc", "connection", 1);

    TConn d = new TConn(session);
    d.set("x-forwarded-for", session.getRemoteAddress().toString());

    session.setAttribute("conn", d);
  }

  /*
   * (non-Javadoc)
   *
   * @see
   * org.apache.mina.core.service.IoHandlerAdapter#sessionClosed(org.apache
   * .mina.core.session.IoSession)
   */
  public void sessionClosed(IoSession session) throws Exception {
    log.info("closed stub: " + session.getRemoteAddress());
    TConn d = (TConn) session.getAttribute("conn");
    if (d != null) {
      d.close();

      session.removeAttribute("conn");
    }
  }

  /*
   * (non-Javadoc)
   *
   * @see
   * org.apache.mina.core.service.IoHandlerAdapter#sessionIdle(org.apache.
   * mina.core.session.IoSession, org.apache.mina.core.session.IdleStatus)
   */
  public void sessionIdle(IoSession session, IdleStatus status) throws Exception {
    if (IdleStatus.BOTH_IDLE.equals(status)) {
      Long l = (Long) session.getAttribute("last");
      if (l != null && System.currentTimeMillis() - l > 60 * 1000) {
        session.close(true);
      }
    }
  }

  /*
   * (non-Javadoc)
   *
   * @see
   * org.apache.mina.core.service.IoHandlerAdapter#messageReceived(org.apache
   * .mina.core.session.IoSession, java.lang.Object)
   */
  public void messageReceived(IoSession session, Object message) throws Exception {
    // System.out.println(message);
    if (message instanceof IoBuffer) {
      service((IoBuffer) message, session);
    }
  }

  /**
   * Creates the tcp server.
   *
   * @param host the host
   * @param port the port
   * @return the MDC server
   */
  public static synchronized MDCServer createTcpServer(String host, int port) {
    return new TDCServer(host, port);
  }

  /**
   * Creates the udp server.
   *
   * @param host the host
   * @param port the port
   * @return the MDC server
   */
  public static synchronized MDCServer createUdpServer(String host, int port) {
    return new UDCServer(host, port);
  }

  /*
   * (non-Javadoc)
   *
   * @see
   * org.apache.mina.core.service.IoHandlerAdapter#exceptionCaught(org.apache
   * .mina.core.session.IoSession, java.lang.Throwable)
   */
  @Override
  public void exceptionCaught(IoSession session, Throwable cause) throws Exception {
    TConn d = (TConn) session.getAttribute("conn");
    if (d != null && d.valid()) {
      App.bye(d);
    }
  }
}
/**
 * The <code>DefaultTcpTransportMapping</code> implements a TCP transport mapping with the Java 1.4
 * new IO API.
 *
 * <p>It uses a single thread for processing incoming and outgoing messages. The thread is started
 * when the <code>listen</code> method is called, or when an outgoing request is sent using the
 * <code>sendMessage</code> method.
 *
 * @author Frank Fock
 * @version 1.7.4a
 */
public class DefaultTcpTransportMapping extends TcpTransportMapping {

  private static final LogAdapter logger = LogFactory.getLogger(DefaultTcpTransportMapping.class);

  private Hashtable sockets = new Hashtable();
  private ServerThread server;

  private Timer socketCleaner;
  // 1 minute default timeout
  private long connectionTimeout = 60000;
  private boolean serverEnabled = false;

  private static final int MIN_SNMP_HEADER_LENGTH = 6;
  private MessageLengthDecoder messageLengthDecoder = new SnmpMesssageLengthDecoder();

  /**
   * Creates a default TCP transport mapping with the server for incoming messages disabled.
   *
   * @throws UnknownHostException
   * @throws IOException on failure of binding a local port.
   */
  public DefaultTcpTransportMapping() throws UnknownHostException, IOException {
    super(new TcpAddress(InetAddress.getLocalHost(), 0));
  }

  /**
   * Creates a default TCP transport mapping that binds to the given address (interface) on the
   * local host.
   *
   * @param serverAddress the TcpAddress instance that describes the server address to listen on
   *     incoming connection requests.
   * @throws UnknownHostException if the specified interface does not exist.
   * @throws IOException if the given address cannot be bound.
   */
  public DefaultTcpTransportMapping(TcpAddress serverAddress)
      throws UnknownHostException, IOException {
    super(serverAddress);
    this.serverEnabled = true;
  }

  /**
   * Listen for incoming and outgoing requests. If the <code>serverEnabled</code> member is <code>
   * false</code> the server for incoming requests is not started. This starts the internal server
   * thread that processes messages.
   *
   * @throws SocketException when the transport is already listening for incoming/outgoing messages.
   * @throws IOException
   */
  public synchronized void listen() throws java.io.IOException {
    if (server != null) {
      throw new SocketException("Port already listening");
    }
    server = new ServerThread();
    if (connectionTimeout > 0) {
      socketCleaner = new Timer(true); // run as daemon
    }
    server.setDaemon(true);
    server.start();
  }

  /**
   * Changes the priority of the server thread for this TCP transport mapping. This method has no
   * effect, if called before {@link #listen()} has been called for this transport mapping.
   *
   * @param newPriority the new priority.
   * @see Thread#setPriority
   * @since 1.2.2
   */
  public void setPriority(int newPriority) {
    ServerThread st = server;
    if (st != null) {
      st.setPriority(newPriority);
    }
  }

  /**
   * Returns the priority of the internal listen thread.
   *
   * @return a value between {@link Thread#MIN_PRIORITY} and {@link Thread#MAX_PRIORITY}.
   * @since 1.2.2
   */
  public int getPriority() {
    ServerThread st = server;
    if (st != null) {
      return st.getPriority();
    } else {
      return Thread.NORM_PRIORITY;
    }
  }

  /**
   * Sets the name of the listen thread for this UDP transport mapping. This method has no effect,
   * if called before {@link #listen()} has been called for this transport mapping.
   *
   * @param name the new thread name.
   * @since 1.6
   */
  public void setThreadName(String name) {
    ServerThread st = server;
    if (st != null) {
      st.setName(name);
    }
  }

  /**
   * Returns the name of the listen thread.
   *
   * @return the thread name if in listening mode, otherwise <code>null</code>.
   * @since 1.6
   */
  public String getThreadName() {
    ServerThread st = server;
    if (st != null) {
      return st.getName();
    } else {
      return null;
    }
  }

  /** Closes all open sockets and stops the internal server thread that processes messages. */
  public void close() {
    ServerThread st = server;
    if (st != null) {
      st.close();
      try {
        st.join();
      } catch (InterruptedException ex) {
        logger.warn(ex);
      }
      server = null;
      for (Iterator it = sockets.values().iterator(); it.hasNext(); ) {
        SocketEntry entry = (SocketEntry) it.next();
        try {
          synchronized (entry) {
            entry.getSocket().close();
          }
          logger.debug("Socket to " + entry.getPeerAddress() + " closed");
        } catch (IOException iox) {
          // ingore
          logger.debug(iox);
        }
      }
      if (socketCleaner != null) {
        socketCleaner.cancel();
      }
      socketCleaner = null;
    }
  }

  /**
   * Closes a connection to the supplied remote address, if it is open. This method is particularly
   * useful when not using a timeout for remote connections.
   *
   * @param remoteAddress the address of the peer socket.
   * @return <code>true</code> if the connection has been closed and <code>false</code> if there was
   *     nothing to close.
   * @since 1.7.1
   */
  public synchronized boolean close(Address remoteAddress) throws IOException {
    if (logger.isDebugEnabled()) {
      logger.debug("Closing socket for peer address " + remoteAddress);
    }
    SocketEntry entry = (SocketEntry) sockets.remove(remoteAddress);
    if (entry != null) {
      synchronized (entry) {
        entry.getSocket().close();
      }
      logger.info("Socket to " + entry.getPeerAddress() + " closed");
      return true;
    }
    return false;
  }

  /**
   * Sends a SNMP message to the supplied address.
   *
   * @param address an <code>TcpAddress</code>. A <code>ClassCastException</code> is thrown if
   *     <code>address</code> is not a <code>TcpAddress</code> instance.
   * @param message byte[] the message to sent.
   * @throws IOException
   */
  public void sendMessage(Address address, byte[] message) throws java.io.IOException {
    if (server == null) {
      listen();
    }
    server.sendMessage(address, message);
  }

  /**
   * Gets the connection timeout. This timeout specifies the time a connection may be idle before it
   * is closed.
   *
   * @return long the idle timeout in milliseconds.
   */
  public long getConnectionTimeout() {
    return connectionTimeout;
  }

  /**
   * Sets the connection timeout. This timeout specifies the time a connection may be idle before it
   * is closed.
   *
   * @param connectionTimeout the idle timeout in milliseconds. A zero or negative value will
   *     disable any timeout and connections opened by this transport mapping will stay opened until
   *     they are explicitly closed.
   */
  public void setConnectionTimeout(long connectionTimeout) {
    this.connectionTimeout = connectionTimeout;
  }

  /**
   * Checks whether a server for incoming requests is enabled.
   *
   * @return boolean
   */
  public boolean isServerEnabled() {
    return serverEnabled;
  }

  public MessageLengthDecoder getMessageLengthDecoder() {
    return messageLengthDecoder;
  }

  /**
   * Sets whether a server for incoming requests should be created when the transport is set into
   * listen state. Setting this value has no effect until the {@link #listen()} method is called (if
   * the transport is already listening, {@link #close()} has to be called before).
   *
   * @param serverEnabled if <code>true</code> if the transport will listens for incoming requests
   *     after {@link #listen()} has been called.
   */
  public void setServerEnabled(boolean serverEnabled) {
    this.serverEnabled = serverEnabled;
  }

  /**
   * Sets the message length decoder. Default message length decoder is the {@link
   * SnmpMesssageLengthDecoder}. The message length decoder must be able to decode the total length
   * of a message for this transport mapping protocol(s).
   *
   * @param messageLengthDecoder a <code>MessageLengthDecoder</code> instance.
   */
  public void setMessageLengthDecoder(MessageLengthDecoder messageLengthDecoder) {
    if (messageLengthDecoder == null) {
      throw new NullPointerException();
    }
    this.messageLengthDecoder = messageLengthDecoder;
  }

  /**
   * Gets the inbound buffer size for incoming requests. When SNMP packets are received that are
   * longer than this maximum size, the messages will be silently dropped and the connection will be
   * closed.
   *
   * @return the maximum inbound buffer size in bytes.
   */
  public int getMaxInboundMessageSize() {
    return super.getMaxInboundMessageSize();
  }

  /**
   * Sets the maximum buffer size for incoming requests. When SNMP packets are received that are
   * longer than this maximum size, the messages will be silently dropped and the connection will be
   * closed.
   *
   * @param maxInboundMessageSize the length of the inbound buffer in bytes.
   */
  public void setMaxInboundMessageSize(int maxInboundMessageSize) {
    this.maxInboundMessageSize = maxInboundMessageSize;
  }

  private synchronized void timeoutSocket(SocketEntry entry) {
    if (connectionTimeout > 0) {
      socketCleaner.schedule(new SocketTimeout(entry), connectionTimeout);
    }
  }

  public boolean isListening() {
    return (server != null);
  }

  class SocketEntry {
    private Socket socket;
    private TcpAddress peerAddress;
    private long lastUse;
    private LinkedList message = new LinkedList();
    private ByteBuffer readBuffer = null;

    public SocketEntry(TcpAddress address, Socket socket) {
      this.peerAddress = address;
      this.socket = socket;
      this.lastUse = System.currentTimeMillis();
    }

    public long getLastUse() {
      return lastUse;
    }

    public void used() {
      lastUse = System.currentTimeMillis();
    }

    public Socket getSocket() {
      return socket;
    }

    public TcpAddress getPeerAddress() {
      return peerAddress;
    }

    public synchronized void addMessage(byte[] message) {
      this.message.add(message);
    }

    public byte[] nextMessage() {
      if (this.message.size() > 0) {
        return (byte[]) this.message.removeFirst();
      }
      return null;
    }

    public void setReadBuffer(ByteBuffer byteBuffer) {
      this.readBuffer = byteBuffer;
    }

    public ByteBuffer getReadBuffer() {
      return readBuffer;
    }

    public String toString() {
      return "SocketEntry[peerAddress="
          + peerAddress
          + ",socket="
          + socket
          + ",lastUse="
          + new Date(lastUse)
          + "]";
    }
  }

  public static class SnmpMesssageLengthDecoder implements MessageLengthDecoder {
    public int getMinHeaderLength() {
      return MIN_SNMP_HEADER_LENGTH;
    }

    public MessageLength getMessageLength(ByteBuffer buf) throws IOException {
      MutableByte type = new MutableByte();
      BERInputStream is = new BERInputStream(buf);
      int ml = BER.decodeHeader(is, type);
      int hl = (int) is.getPosition();
      MessageLength messageLength = new MessageLength(hl, ml);
      return messageLength;
    }
  }

  class SocketTimeout extends TimerTask {
    private SocketEntry entry;

    public SocketTimeout(SocketEntry entry) {
      this.entry = entry;
    }

    /** run */
    public void run() {
      long now = System.currentTimeMillis();
      if ((socketCleaner == null) || (now - entry.getLastUse() >= connectionTimeout)) {
        if (logger.isDebugEnabled()) {
          logger.debug(
              "Socket has not been used for "
                  + (now - entry.getLastUse())
                  + " micro seconds, closing it");
        }
        sockets.remove(entry.getPeerAddress());
        try {
          synchronized (entry) {
            entry.getSocket().close();
          }
          logger.info("Socket to " + entry.getPeerAddress() + " closed due to timeout");
        } catch (IOException ex) {
          logger.error(ex);
        }
      } else {
        if (logger.isDebugEnabled()) {
          logger.debug("Scheduling " + ((entry.getLastUse() + connectionTimeout) - now));
        }
        socketCleaner.schedule(
            new SocketTimeout(entry), (entry.getLastUse() + connectionTimeout) - now);
      }
    }
  }

  class ServerThread extends Thread {
    private byte[] buf;
    private volatile boolean stop = false;
    private Throwable lastError = null;
    private ServerSocketChannel ssc;
    private Selector selector;

    private LinkedList pending = new LinkedList();

    public ServerThread() throws IOException {
      setName("DefaultTCPTransportMapping_" + getAddress());
      buf = new byte[getMaxInboundMessageSize()];
      // Selector for incoming requests
      selector = Selector.open();

      if (serverEnabled) {
        // Create a new server socket and set to non blocking mode
        ssc = ServerSocketChannel.open();
        ssc.configureBlocking(false);

        // Bind the server socket
        InetSocketAddress isa =
            new InetSocketAddress(tcpAddress.getInetAddress(), tcpAddress.getPort());
        ssc.socket().bind(isa);
        // Register accepts on the server socket with the selector. This
        // step tells the selector that the socket wants to be put on the
        // ready list when accept operations occur, so allowing multiplexed
        // non-blocking I/O to take place.
        ssc.register(selector, SelectionKey.OP_ACCEPT);
      }
    }

    private void processPending() {
      synchronized (pending) {
        while (pending.size() > 0) {
          SocketEntry entry = (SocketEntry) pending.removeFirst();
          try {
            // Register the channel with the selector, indicating
            // interest in connection completion and attaching the
            // target object so that we can get the target back
            // after the key is added to the selector's
            // selected-key set
            if (entry.getSocket().isConnected()) {
              entry.getSocket().getChannel().register(selector, SelectionKey.OP_WRITE, entry);
            } else {
              entry.getSocket().getChannel().register(selector, SelectionKey.OP_CONNECT, entry);
            }

          } catch (IOException iox) {
            logger.error(iox);
            // Something went wrong, so close the channel and
            // record the failure
            try {
              entry.getSocket().getChannel().close();
              TransportStateEvent e =
                  new TransportStateEvent(
                      DefaultTcpTransportMapping.this,
                      entry.getPeerAddress(),
                      TransportStateEvent.STATE_CLOSED,
                      iox);
              fireConnectionStateChanged(e);
            } catch (IOException ex) {
              logger.error(ex);
            }
            lastError = iox;
          }
        }
      }
    }

    public Throwable getLastError() {
      return lastError;
    }

    public void sendMessage(Address address, byte[] message) throws java.io.IOException {
      Socket s = null;
      SocketEntry entry = (SocketEntry) sockets.get(address);
      if (logger.isDebugEnabled()) {
        logger.debug("Looking up connection for destination '" + address + "' returned: " + entry);
        logger.debug(sockets.toString());
      }
      if (entry != null) {
        s = entry.getSocket();
      }
      if ((s == null) || (s.isClosed())) {
        if (logger.isDebugEnabled()) {
          logger.debug("Socket for address '" + address + "' is closed, opening it...");
        }
        SocketChannel sc = null;
        try {
          // Open the channel, set it to non-blocking, initiate connect
          sc = SocketChannel.open();
          sc.configureBlocking(false);
          sc.connect(
              new InetSocketAddress(
                  ((TcpAddress) address).getInetAddress(), ((TcpAddress) address).getPort()));
          s = sc.socket();
          entry = new SocketEntry((TcpAddress) address, s);
          entry.addMessage(message);
          sockets.put(address, entry);

          synchronized (pending) {
            pending.add(entry);
          }

          selector.wakeup();
          logger.debug("Trying to connect to " + address);
        } catch (IOException iox) {
          logger.error(iox);
          throw iox;
        }
      } else {
        entry.addMessage(message);
        synchronized (pending) {
          pending.add(entry);
        }
        selector.wakeup();
      }
    }

    public void run() {
      // Here's where everything happens. The select method will
      // return when any operations registered above have occurred, the
      // thread has been interrupted, etc.
      try {
        while (!stop) {
          try {
            if (selector.select() > 0) {
              if (stop) {
                break;
              }
              // Someone is ready for I/O, get the ready keys
              Set readyKeys = selector.selectedKeys();
              Iterator it = readyKeys.iterator();

              // Walk through the ready keys collection and process date requests.
              while (it.hasNext()) {
                SelectionKey sk = (SelectionKey) it.next();
                it.remove();
                SocketChannel readChannel = null;
                TcpAddress incomingAddress = null;
                if (sk.isAcceptable()) {
                  // The key indexes into the selector so you
                  // can retrieve the socket that's ready for I/O
                  ServerSocketChannel nextReady = (ServerSocketChannel) sk.channel();
                  // Accept the date request and send back the date string
                  Socket s = nextReady.accept().socket();
                  readChannel = s.getChannel();
                  readChannel.configureBlocking(false);
                  readChannel.register(selector, SelectionKey.OP_READ);

                  incomingAddress = new TcpAddress(s.getInetAddress(), s.getPort());
                  SocketEntry entry = new SocketEntry(incomingAddress, s);
                  sockets.put(incomingAddress, entry);
                  timeoutSocket(entry);
                  TransportStateEvent e =
                      new TransportStateEvent(
                          DefaultTcpTransportMapping.this,
                          incomingAddress,
                          TransportStateEvent.STATE_CONNECTED,
                          null);
                  fireConnectionStateChanged(e);
                } else if (sk.isReadable()) {
                  readChannel = (SocketChannel) sk.channel();
                  incomingAddress =
                      new TcpAddress(
                          readChannel.socket().getInetAddress(), readChannel.socket().getPort());
                } else if (sk.isWritable()) {
                  try {
                    SocketEntry entry = (SocketEntry) sk.attachment();
                    SocketChannel sc = (SocketChannel) sk.channel();
                    if (entry != null) {
                      writeMessage(entry, sc);
                    }
                  } catch (IOException iox) {
                    if (logger.isDebugEnabled()) {
                      iox.printStackTrace();
                    }
                    logger.warn(iox);
                    TransportStateEvent e =
                        new TransportStateEvent(
                            DefaultTcpTransportMapping.this,
                            incomingAddress,
                            TransportStateEvent.STATE_DISCONNECTED_REMOTELY,
                            iox);
                    fireConnectionStateChanged(e);
                    sk.cancel();
                  }
                } else if (sk.isConnectable()) {
                  try {
                    SocketEntry entry = (SocketEntry) sk.attachment();
                    SocketChannel sc = (SocketChannel) sk.channel();
                    if ((!sc.isConnected()) && (sc.finishConnect())) {
                      sc.configureBlocking(false);
                      logger.debug("Connected to " + entry.getPeerAddress());
                      // make sure conncetion is closed if not used for timeout
                      // micro seconds
                      timeoutSocket(entry);
                      sc.register(selector, SelectionKey.OP_WRITE, entry);
                    }
                    TransportStateEvent e =
                        new TransportStateEvent(
                            DefaultTcpTransportMapping.this,
                            incomingAddress,
                            TransportStateEvent.STATE_CONNECTED,
                            null);
                    fireConnectionStateChanged(e);
                  } catch (IOException iox) {
                    if (logger.isDebugEnabled()) {
                      iox.printStackTrace();
                    }
                    logger.warn(iox);
                    sk.cancel();
                  }
                }

                if (readChannel != null) {
                  try {
                    readMessage(sk, readChannel, incomingAddress);
                  } catch (IOException iox) {
                    // IO exception -> channel closed remotely
                    if (logger.isDebugEnabled()) {
                      iox.printStackTrace();
                    }
                    logger.warn(iox);
                    sk.cancel();
                    readChannel.close();
                    TransportStateEvent e =
                        new TransportStateEvent(
                            DefaultTcpTransportMapping.this,
                            incomingAddress,
                            TransportStateEvent.STATE_DISCONNECTED_REMOTELY,
                            iox);
                    fireConnectionStateChanged(e);
                  }
                }
              }
            }
          } catch (NullPointerException npex) {
            // There seems to happen a NullPointerException within the select()
            npex.printStackTrace();
            logger.warn("NullPointerException within select()?");
          }
          processPending();
        }
        if (ssc != null) {
          ssc.close();
        }
      } catch (IOException iox) {
        logger.error(iox);
        lastError = iox;
      }
      if (!stop) {
        stop = true;
        synchronized (DefaultTcpTransportMapping.this) {
          server = null;
        }
      }
    }

    private void readMessage(SelectionKey sk, SocketChannel readChannel, TcpAddress incomingAddress)
        throws IOException {
      // note that socket has been used
      SocketEntry entry = (SocketEntry) sockets.get(incomingAddress);
      if (entry != null) {
        entry.used();
        ByteBuffer readBuffer = entry.getReadBuffer();
        if (readBuffer != null) {
          readChannel.read(readBuffer);
          if (readBuffer.hasRemaining()) {
            readChannel.register(selector, SelectionKey.OP_READ, entry);
          } else {
            dispatchMessage(incomingAddress, readBuffer, readBuffer.capacity());
          }
          return;
        }
      }
      ByteBuffer byteBuffer = ByteBuffer.wrap(buf);
      byteBuffer.limit(messageLengthDecoder.getMinHeaderLength());
      long bytesRead = readChannel.read(byteBuffer);
      if (logger.isDebugEnabled()) {
        logger.debug("Reading header " + bytesRead + " bytes from " + incomingAddress);
      }
      MessageLength messageLength = new MessageLength(0, Integer.MIN_VALUE);
      if (bytesRead == messageLengthDecoder.getMinHeaderLength()) {
        messageLength = messageLengthDecoder.getMessageLength(ByteBuffer.wrap(buf));
        if (logger.isDebugEnabled()) {
          logger.debug("Message length is " + messageLength);
        }
        if ((messageLength.getMessageLength() > getMaxInboundMessageSize())
            || (messageLength.getMessageLength() <= 0)) {
          logger.error(
              "Received message length "
                  + messageLength
                  + " is greater than inboundBufferSize "
                  + getMaxInboundMessageSize());
          synchronized (entry) {
            entry.getSocket().close();
            logger.info("Socket to " + entry.getPeerAddress() + " closed due to an error");
          }
        } else {
          byteBuffer.limit(messageLength.getMessageLength());
          bytesRead += readChannel.read(byteBuffer);
          if (bytesRead == messageLength.getMessageLength()) {
            dispatchMessage(incomingAddress, byteBuffer, bytesRead);
          } else {
            byte[] message = new byte[byteBuffer.limit()];
            byteBuffer.flip();
            byteBuffer.get(message, 0, byteBuffer.limit() - byteBuffer.remaining());
            entry.setReadBuffer(ByteBuffer.wrap(message));
          }
          readChannel.register(selector, SelectionKey.OP_READ, entry);
        }
      } else if (bytesRead < 0) {
        logger.debug("Socket closed remotely");
        sk.cancel();
        readChannel.close();
        TransportStateEvent e =
            new TransportStateEvent(
                DefaultTcpTransportMapping.this,
                incomingAddress,
                TransportStateEvent.STATE_DISCONNECTED_REMOTELY,
                null);
        fireConnectionStateChanged(e);
      }
    }

    private void dispatchMessage(
        TcpAddress incomingAddress, ByteBuffer byteBuffer, long bytesRead) {
      byteBuffer.flip();
      if (logger.isDebugEnabled()) {
        logger.debug(
            "Received message from "
                + incomingAddress
                + " with length "
                + bytesRead
                + ": "
                + new OctetString(byteBuffer.array(), 0, (int) bytesRead).toHexString());
      }
      ByteBuffer bis;
      if (isAsyncMsgProcessingSupported()) {
        byte[] bytes = new byte[(int) bytesRead];
        System.arraycopy(byteBuffer.array(), 0, bytes, 0, (int) bytesRead);
        bis = ByteBuffer.wrap(bytes);
      } else {
        bis = ByteBuffer.wrap(byteBuffer.array(), 0, (int) bytesRead);
      }
      fireProcessMessage(incomingAddress, bis);
    }

    private void writeMessage(SocketEntry entry, SocketChannel sc) throws IOException {
      byte[] message = entry.nextMessage();
      if (message != null) {
        ByteBuffer buffer = ByteBuffer.wrap(message);
        sc.write(buffer);
        if (logger.isDebugEnabled()) {
          logger.debug(
              "Send message with length "
                  + message.length
                  + " to "
                  + entry.getPeerAddress()
                  + ": "
                  + new OctetString(message).toHexString());
        }
        sc.register(selector, SelectionKey.OP_READ);
      }
    }

    public void close() {
      stop = true;
      ServerThread st = server;
      if (st != null) {
        st.interrupt();
      }
    }
  }
}
/** @author Javier Paniza */
public class MetaFinder implements Serializable {

  private static Log log = LogFactory.getLog(MetaFinder.class);

  private static Map argumentsJBoss11ToEJBQL;
  private static Map argumentsToHQL;
  private static Map tokensToChangeDollarsAndNL;

  private String name;
  private String arguments;
  private boolean collection;
  private String condition;
  private String order;
  private MetaModel metaModel;

  public String getArguments() {
    arguments = Strings.change(arguments, "String", "java.lang.String");
    arguments = Strings.change(arguments, "java.lang.java.lang.String", "java.lang.String");
    return arguments;
  }

  public Collection getMetaPropertiesArguments() throws XavaException {
    StringTokenizer st = new StringTokenizer(getArguments(), ",");
    Collection result = new ArrayList();
    while (st.hasMoreTokens()) {
      String argument = st.nextToken();
      StringTokenizer argumentSt = new StringTokenizer(argument);
      String type = argumentSt.nextToken().trim();
      String name = argumentSt.nextToken().trim();
      MetaProperty p = new MetaProperty();
      p.setName(name);
      p.setTypeName(type);
      result.add(p);
    }
    return result;
  }

  public boolean isCollection() {
    return collection;
  }

  public String getCondition() {
    return condition;
  }

  public String getName() {
    return name;
  }

  public void setArguments(String arguments) {
    this.arguments = arguments;
  }

  public void setCollection(boolean collection) {
    this.collection = collection;
  }

  public void setCondition(String condition) {
    this.condition = condition;
  }

  public void setName(String name) {
    this.name = name;
  }

  public boolean isSupportedForEJB2() throws XavaException {
    return !hasSome3LevelProperty(getCondition()) && !hasSome3LevelProperty(getOrder());
  }

  private boolean hasSome3LevelProperty(String sentence) throws XavaException {
    if (sentence == null) return false;
    int i = sentence.indexOf("${");
    int f = 0;
    while (i >= 0) {
      f = sentence.indexOf("}", i + 2);
      if (f < 0) break;
      String property = sentence.substring(i + 2, f);
      StringTokenizer st = new StringTokenizer(property, ".");
      if (st.countTokens() > 3) {
        log.warn(XavaResources.getString("property_3_level_in_ejb2_finder", property, getName()));
        return true;
      }
      if (st.countTokens() == 3) {
        if (!getMetaModel().getMetaProperty(property).isKey()) {
          log.warn(XavaResources.getString("property_3_level_in_ejb2_finder", property, getName()));
          return true;
        }
      }
      i = sentence.indexOf("${", i + 1);
    }
    return false;
  }

  public String getEJBQLCondition() throws XavaException {
    StringBuffer sb = new StringBuffer("SELECT OBJECT(o) FROM ");
    sb.append(getMetaModel().getName());
    sb.append(" o");
    if (!Is.emptyString(this.condition)) {
      sb.append(" WHERE ");
      String attributesCondition =
          getMetaModel().getMapping().changePropertiesByCMPAttributes(this.condition);
      sb.append(Strings.change(attributesCondition, getArgumentsJBoss11ToEJBQL()));
    }
    if (!Is.emptyString(this.order)) {
      sb.append(" ORDER BY ");
      sb.append(getMetaModel().getMapping().changePropertiesByCMPAttributes(this.order));
    }
    return sb.toString();
  }

  public String getHQLCondition() throws XavaException {
    return getHQLCondition(true);
  }

  private String getHQLCondition(boolean order) throws XavaException {
    StringBuffer sb = new StringBuffer("from ");
    sb.append(getMetaModel().getName());
    sb.append(" as o");
    if (!Is.emptyString(this.condition)) {
      sb.append(" where ");
      String condition = transformAggregateProperties(getCondition());
      condition = Strings.change(condition, getArgumentsToHQL());
      sb.append(Strings.change(condition, getTokensToChangeDollarsAndNL()));
    }
    if (order && !Is.emptyString(this.order)) {
      sb.append(" order by ");
      sb.append(
          Strings.change(
              transformAggregateProperties(this.order), getTokensToChangeDollarsAndNL()));
    }
    return sb.toString();
  }

  /**
   * Transforms ${address.street} in ${address_street} if address if an aggregate of container
   * model.
   *
   * @param condition
   * @return
   */
  private String transformAggregateProperties(String condition) {
    int i = condition.indexOf("${");
    if (i < 0) return condition;
    StringBuffer result = new StringBuffer(condition.substring(0, i + 2));
    while (i >= 0) {
      int f = condition.indexOf("}", i);
      String property = condition.substring(i + 2, f);
      String transformedProperty = transformAgregateProperty(property);
      result.append(transformedProperty);
      i = condition.indexOf("${", f);
      if (i >= 0) result.append(condition.substring(f, i));
      else result.append(condition.substring(f));
    }
    return result.toString();
  }

  private String transformAgregateProperty(String property) {
    StringBuffer result = new StringBuffer();
    StringTokenizer st = new StringTokenizer(property, ".");
    String member = "";
    while (st.hasMoreTokens()) {
      String token = st.nextToken();
      result.append(token);
      if (!st.hasMoreTokens()) break;
      member = member + token;
      try {
        MetaReference ref = getMetaModel().getMetaReference(member);
        if (ref.isAggregate()) result.append('_');
        else result.append('.');
      } catch (XavaException ex) {
        result.append('.');
      }
      member = member + ".";
    }
    return result.toString();
  }

  public String getHQLCountSentence() throws XavaException {
    StringBuffer sb = new StringBuffer("select count(*) ");
    sb.append(getHQLCondition(false));
    return sb.toString();
  }

  public MetaModel getMetaModel() {
    return metaModel;
  }

  public void setMetaModel(MetaModel metaModel) {
    this.metaModel = metaModel;
  }

  public String getOrder() {
    return order;
  }

  public void setOrder(String order) {
    this.order = order;
  }

  private static Map getArgumentsJBoss11ToEJBQL() {
    if (argumentsJBoss11ToEJBQL == null) {
      argumentsJBoss11ToEJBQL = new HashMap();
      for (int i = 0; i < 30; i++) {
        argumentsJBoss11ToEJBQL.put("{" + i + "}", "?" + (i + 1));
      }
    }
    return argumentsJBoss11ToEJBQL;
  }

  private static Map getArgumentsToHQL() {
    if (argumentsToHQL == null) {
      argumentsToHQL = new HashMap();
      for (int i = 0; i < 30; i++) {
        argumentsToHQL.put("{" + i + "}", ":arg" + i);
      }
    }
    return argumentsToHQL;
  }

  static Map getTokensToChangeDollarsAndNL() {
    if (tokensToChangeDollarsAndNL == null) {
      tokensToChangeDollarsAndNL = new HashMap();
      tokensToChangeDollarsAndNL.put("${", "o.");
      tokensToChangeDollarsAndNL.put("}", "");
      tokensToChangeDollarsAndNL.put("\n", "");
    }
    return tokensToChangeDollarsAndNL;
  }

  public boolean equals(Object other) {
    if (!(other instanceof MetaFinder)) return false;
    return toString().equals(other.toString());
  }

  public int hashCode() {
    return toString().hashCode();
  }

  public String toString() {
    return "Finder: " + getMetaModel().getName() + "." + getName();
  }
}
Example #5
0
/**
 * Distribute application-specific large, read-only files efficiently.
 *
 * <p><code>DistributedCache</code> is a facility provided by the Map-Reduce framework to cache
 * files (text, archives, jars etc.) needed by applications.
 *
 * <p>Applications specify the files, via urls (hdfs:// or http://) to be cached via the {@link
 * org.apache.hadoop.mapred.JobConf}. The <code>DistributedCache</code> assumes that the files
 * specified via hdfs:// urls are already present on the {@link FileSystem} at the path specified by
 * the url.
 *
 * <p>The framework will copy the necessary files on to the slave node before any tasks for the job
 * are executed on that node. Its efficiency stems from the fact that the files are only copied once
 * per job and the ability to cache archives which are un-archived on the slaves.
 *
 * <p><code>DistributedCache</code> can be used to distribute simple, read-only data/text files
 * and/or more complex types such as archives, jars etc. Archives (zip, tar and tgz/tar.gz files)
 * are un-archived at the slave nodes. Jars may be optionally added to the classpath of the tasks, a
 * rudimentary software distribution mechanism. Files have execution permissions. Optionally users
 * can also direct it to symlink the distributed cache file(s) into the working directory of the
 * task.
 *
 * <p><code>DistributedCache</code> tracks modification timestamps of the cache files. Clearly the
 * cache files should not be modified by the application or externally while the job is executing.
 *
 * <p>Here is an illustrative example on how to use the <code>DistributedCache</code>:
 *
 * <p>
 *
 * <blockquote>
 *
 * <pre>
 *     // Setting up the cache for the application
 *
 *     1. Copy the requisite files to the <code>FileSystem</code>:
 *
 *     $ bin/hadoop fs -copyFromLocal lookup.dat /myapp/lookup.dat
 *     $ bin/hadoop fs -copyFromLocal map.zip /myapp/map.zip
 *     $ bin/hadoop fs -copyFromLocal mylib.jar /myapp/mylib.jar
 *     $ bin/hadoop fs -copyFromLocal mytar.tar /myapp/mytar.tar
 *     $ bin/hadoop fs -copyFromLocal mytgz.tgz /myapp/mytgz.tgz
 *     $ bin/hadoop fs -copyFromLocal mytargz.tar.gz /myapp/mytargz.tar.gz
 *
 *     2. Setup the application's <code>JobConf</code>:
 *
 *     JobConf job = new JobConf();
 *     DistributedCache.addCacheFile(new URI("/myapp/lookup.dat#lookup.dat"),
 *                                   job);
 *     DistributedCache.addCacheArchive(new URI("/myapp/map.zip", job);
 *     DistributedCache.addFileToClassPath(new Path("/myapp/mylib.jar"), job);
 *     DistributedCache.addCacheArchive(new URI("/myapp/mytar.tar", job);
 *     DistributedCache.addCacheArchive(new URI("/myapp/mytgz.tgz", job);
 *     DistributedCache.addCacheArchive(new URI("/myapp/mytargz.tar.gz", job);
 *
 *     3. Use the cached files in the {@link org.apache.hadoop.mapred.Mapper}
 *     or {@link org.apache.hadoop.mapred.Reducer}:
 *
 *     public static class MapClass extends MapReduceBase
 *     implements Mapper&lt;K, V, K, V&gt; {
 *
 *       private Path[] localArchives;
 *       private Path[] localFiles;
 *
 *       public void configure(JobConf job) {
 *         // Get the cached archives/files
 *         localArchives = DistributedCache.getLocalCacheArchives(job);
 *         localFiles = DistributedCache.getLocalCacheFiles(job);
 *       }
 *
 *       public void map(K key, V value,
 *                       OutputCollector&lt;K, V&gt; output, Reporter reporter)
 *       throws IOException {
 *         // Use data from the cached archives/files here
 *         // ...
 *         // ...
 *         output.collect(k, v);
 *       }
 *     }
 *
 * </pre>
 *
 * </blockquote>
 *
 * @see org.apache.hadoop.mapred.JobConf
 * @see org.apache.hadoop.mapred.JobClient
 */
public class DistributedCache {
  // cacheID to cacheStatus mapping
  private static TreeMap<String, CacheStatus> cachedArchives = new TreeMap<String, CacheStatus>();

  private static TreeMap<Path, Long> baseDirSize = new TreeMap<Path, Long>();
  private static TreeMap<Path, Integer> baseDirNumberSubDir = new TreeMap<Path, Integer>();

  // default total cache size
  private static final long DEFAULT_CACHE_SIZE = 10737418240L;
  private static final long DEFAULT_CACHE_SUBDIR_LIMIT = 10000;

  private static final Log LOG = LogFactory.getLog(DistributedCache.class);
  private static Random random = new Random();

  /**
   * Get the locally cached file or archive; it could either be previously cached (and valid) or
   * copy it from the {@link FileSystem} now.
   *
   * @param cache the cache to be localized, this should be specified as new
   *     URI(hdfs://hostname:port/absolute_path_to_file#LINKNAME). If no schema or hostname:port is
   *     provided the file is assumed to be in the filesystem being used in the Configuration
   * @param conf The Confguration file which contains the filesystem
   * @param baseDir The base cache Dir where you wnat to localize the files/archives
   * @param fileStatus The file status on the dfs.
   * @param isArchive if the cache is an archive or a file. In case it is an archive with a .zip or
   *     .jar or .tar or .tgz or .tar.gz extension it will be unzipped/unjarred/untarred
   *     automatically and the directory where the archive is unzipped/unjarred/untarred is returned
   *     as the Path. In case of a file, the path to the file is returned
   * @param confFileStamp this is the hdfs file modification timestamp to verify that the file to be
   *     cached hasn't changed since the job started
   * @param currentWorkDir this is the directory where you would want to create symlinks for the
   *     locally cached files/archives
   * @return the path to directory where the archives are unjarred in case of archives, the path to
   *     the file where the file is copied locally
   * @throws IOException
   */
  public static Path getLocalCache(
      URI cache,
      Configuration conf,
      Path baseDir,
      FileStatus fileStatus,
      boolean isArchive,
      long confFileStamp,
      Path currentWorkDir,
      MRAsyncDiskService asyncDiskService)
      throws IOException {
    return getLocalCache(
        cache,
        conf,
        baseDir,
        fileStatus,
        isArchive,
        confFileStamp,
        fileStatus.getLen(),
        currentWorkDir,
        true,
        asyncDiskService,
        new LocalDirAllocator("mapred.local.dir"));
  }

  public static Path getLocalCacheFromTimestamps(
      URI cache,
      Configuration conf,
      Path subDir,
      FileStatus fileStatus,
      boolean isArchive,
      long confFileStamp,
      long fileLength,
      Path currentWorkDir,
      boolean honorSymLinkConf,
      MRAsyncDiskService asyncDiskService,
      LocalDirAllocator lDirAllocator)
      throws IOException {
    return getLocalCache(
        cache,
        conf,
        subDir,
        fileStatus,
        isArchive,
        confFileStamp,
        fileLength,
        currentWorkDir,
        honorSymLinkConf,
        asyncDiskService,
        lDirAllocator);
  }

  public static Path getLocalCacheFromURI(
      URI cache,
      Configuration conf,
      Path subDir,
      boolean isArchive,
      long fileLength,
      Path currentWorkDir,
      boolean honorSymLinkConf,
      MRAsyncDiskService asyncDiskService,
      LocalDirAllocator lDirAllocator)
      throws IOException {
    return getLocalCache(
        cache,
        conf,
        subDir,
        null,
        isArchive,
        0,
        fileLength,
        currentWorkDir,
        honorSymLinkConf,
        asyncDiskService,
        lDirAllocator);
  }

  /** Added for back compatibility. */
  public static Path getLocalCache(
      URI cache,
      Configuration conf,
      Path subdir,
      FileStatus fileStatus,
      boolean isArchive,
      long confFileStamp,
      Path currentWorkDir,
      boolean honorSymLinkConf,
      MRAsyncDiskService asyncDiskService,
      LocalDirAllocator lDirAllocator)
      throws IOException {
    return getLocalCache(
        cache,
        conf,
        subdir,
        fileStatus,
        isArchive,
        confFileStamp,
        fileStatus.getLen(),
        currentWorkDir,
        honorSymLinkConf,
        asyncDiskService,
        lDirAllocator);
  }

  /**
   * Get the locally cached file or archive; it could either be previously cached (and valid) or
   * copy it from the {@link FileSystem} now.
   *
   * @param cache the cache to be localized, this should be specified as new
   *     URI(hdfs://hostname:port/absolute_path_to_file#LINKNAME). If no schema or hostname:port is
   *     provided the file is assumed to be in the filesystem being used in the Configuration
   * @param conf The Confguration file which contains the filesystem
   * @param subDir The sub cache Dir where you want to localize the files/archives
   * @param fileStatus The file status on the dfs.
   * @param isArchive if the cache is an archive or a file. In case it is an archive with a .zip or
   *     .jar or .tar or .tgz or .tar.gz extension it will be unzipped/unjarred/untarred
   *     automatically and the directory where the archive is unzipped/unjarred/untarred is returned
   *     as the Path. In case of a file, the path to the file is returned
   * @param confFileStamp this is the hdfs file modification timestamp to verify that the file to be
   *     cached hasn't changed since the job started
   * @param fileLength this is the length of the cache file
   * @param currentWorkDir this is the directory where you would want to create symlinks for the
   *     locally cached files/archives
   * @param honorSymLinkConf if this is false, then the symlinks are not created even if conf says
   *     so (this is required for an optimization in task launches
   * @param lDirAllocator LocalDirAllocator of the tracker
   * @return the path to directory where the archives are unjarred in case of archives, the path to
   *     the file where the file is copied locally
   * @throws IOException
   */
  private static Path getLocalCache(
      URI cache,
      Configuration conf,
      Path subDir,
      FileStatus fileStatus,
      boolean isArchive,
      long confFileStamp,
      long fileLength,
      Path currentWorkDir,
      boolean honorSymLinkConf,
      MRAsyncDiskService asyncDiskService,
      LocalDirAllocator lDirAllocator)
      throws IOException {
    String key = getKey(cache, conf, confFileStamp);

    CacheStatus lcacheStatus;
    Path localizedPath;
    synchronized (cachedArchives) {
      lcacheStatus = cachedArchives.get(key);
      if (lcacheStatus == null) {
        // was never localized
        Path uniqueParentDir = new Path(subDir, String.valueOf(random.nextLong()));
        String cachePath = new Path(uniqueParentDir, makeRelative(cache, conf)).toString();
        Path localPath = lDirAllocator.getLocalPathForWrite(cachePath, fileLength, conf);
        lcacheStatus =
            new CacheStatus(
                new Path(localPath.toString().replace(cachePath, "")), localPath, uniqueParentDir);
        cachedArchives.put(key, lcacheStatus);
      }
      lcacheStatus.refcount++;
    }
    boolean initSuccessful = false;
    try {
      synchronized (lcacheStatus) {
        if (!lcacheStatus.isInited()) {
          localizedPath = localizeCache(conf, cache, confFileStamp, lcacheStatus, isArchive);
          lcacheStatus.initComplete();
        } else {
          if (fileStatus != null) {
            localizedPath =
                checkCacheStatusValidity(
                    conf, cache, confFileStamp, lcacheStatus, fileStatus, isArchive);
          } else {
            // if fileStatus is null, then the md5 must be correct
            // so there is no need to check for cache validity
            localizedPath = lcacheStatus.localizedLoadPath;
          }
        }
        createSymlink(conf, cache, lcacheStatus, isArchive, currentWorkDir, honorSymLinkConf);
      }

      // try deleting stuff if you can
      long size = 0;
      int numberSubDir = 0;
      synchronized (lcacheStatus) {
        synchronized (baseDirSize) {
          Long get = baseDirSize.get(lcacheStatus.getBaseDir());
          if (get != null) {
            size = get.longValue();
          } else {
            LOG.warn("Cannot find size of baseDir: " + lcacheStatus.getBaseDir());
          }
        }
        synchronized (baseDirNumberSubDir) {
          Integer get = baseDirNumberSubDir.get(lcacheStatus.getBaseDir());
          if (get != null) {
            numberSubDir = get.intValue();
          } else {
            LOG.warn("Cannot find subdirectories limit of baseDir: " + lcacheStatus.getBaseDir());
          }
        }
      }
      // setting the cache size to a default of 10GB
      long allowedSize = conf.getLong("local.cache.size", DEFAULT_CACHE_SIZE);
      long allowedNumberSubDir =
          conf.getLong("local.cache.numbersubdir", DEFAULT_CACHE_SUBDIR_LIMIT);
      if (allowedSize < size || allowedNumberSubDir < numberSubDir) {
        // try some cache deletions
        LOG.debug(
            "Start deleting released cache because"
                + " [size, allowedSize, numberSubDir, allowedNumberSubDir] ="
                + " ["
                + size
                + ", "
                + allowedSize
                + ", "
                + numberSubDir
                + ", "
                + allowedNumberSubDir
                + "]");
        deleteCache(conf, asyncDiskService);
      }
      initSuccessful = true;
      return localizedPath;
    } finally {
      if (!initSuccessful) {
        synchronized (cachedArchives) {
          lcacheStatus.refcount--;
        }
      }
    }
  }

  /**
   * Get the locally cached file or archive; it could either be previously cached (and valid) or
   * copy it from the {@link FileSystem} now.
   *
   * @param cache the cache to be localized, this should be specified as new
   *     URI(hdfs://hostname:port/absolute_path_to_file#LINKNAME). If no schema or hostname:port is
   *     provided the file is assumed to be in the filesystem being used in the Configuration
   * @param conf The Confguration file which contains the filesystem
   * @param baseDir The base cache Dir where you wnat to localize the files/archives
   * @param isArchive if the cache is an archive or a file. In case it is an archive with a .zip or
   *     .jar or .tar or .tgz or .tar.gz extension it will be unzipped/unjarred/untarred
   *     automatically and the directory where the archive is unzipped/unjarred/untarred is returned
   *     as the Path. In case of a file, the path to the file is returned
   * @param confFileStamp this is the hdfs file modification timestamp to verify that the file to be
   *     cached hasn't changed since the job started
   * @param currentWorkDir this is the directory where you would want to create symlinks for the
   *     locally cached files/archives
   * @return the path to directory where the archives are unjarred in case of archives, the path to
   *     the file where the file is copied locally
   * @throws IOException
   */
  public static Path getLocalCache(
      URI cache,
      Configuration conf,
      Path baseDir,
      boolean isArchive,
      long confFileStamp,
      Path currentWorkDir,
      MRAsyncDiskService asyncDiskService)
      throws IOException {
    return getLocalCache(
        cache, conf, baseDir, null, isArchive, confFileStamp, currentWorkDir, asyncDiskService);
  }

  /**
   * This is the opposite of getlocalcache. When you are done with using the cache, you need to
   * release the cache
   *
   * @param cache The cache URI to be released
   * @param conf configuration which contains the filesystem the cache is contained in.
   * @throws IOException
   */
  public static void releaseCache(URI cache, Configuration conf, long timeStamp)
      throws IOException {
    String cacheId = getKey(cache, conf, timeStamp);

    synchronized (cachedArchives) {
      CacheStatus lcacheStatus = cachedArchives.get(cacheId);
      if (lcacheStatus == null) {
        LOG.warn(
            "Cannot find localized cache: " + cache + " (key: " + cacheId + ") in releaseCache!");
        return;
      }
      lcacheStatus.refcount--;
    }
  }

  /** Runnable which removes the cache directories from the disk */
  private static class CacheFileCleanTask implements Runnable {
    private MRAsyncDiskService asyncDiskService;
    private LocalFileSystem fs;
    private List<CacheStatus> toBeDeletedCache;

    public CacheFileCleanTask(
        MRAsyncDiskService asyncDiskService,
        LocalFileSystem fs,
        List<CacheStatus> toBeDeletedCache) {
      this.asyncDiskService = asyncDiskService;
      this.fs = fs;
      this.toBeDeletedCache = toBeDeletedCache;
    }

    @Override
    public void run() {
      for (CacheStatus lcacheStatus : toBeDeletedCache) {
        synchronized (lcacheStatus) {
          Path fullUniqueParentDir =
              new Path(lcacheStatus.localizedBaseDir, lcacheStatus.uniqueParentDir);
          try {
            LOG.info("Deleting local cached path: " + fullUniqueParentDir.toString());
            deleteLocalPath(asyncDiskService, fs, fullUniqueParentDir);
            // decrement the size of the cache from baseDirSize
            deleteCacheInfoUpdate(lcacheStatus);
            LOG.info("Removed cache " + lcacheStatus.localizedLoadPath);
          } catch (IOException e) {
            LOG.warn("Error when deleting " + fullUniqueParentDir, e);
          }
        }
      }
    }
  }
  // To delete the caches which have a refcount of zero

  private static void deleteCache(Configuration conf, MRAsyncDiskService asyncDiskService)
      throws IOException {
    List<CacheStatus> deleteSet = new LinkedList<CacheStatus>();
    // try deleting cache Status with refcount of zero
    synchronized (cachedArchives) {
      for (Iterator<String> it = cachedArchives.keySet().iterator(); it.hasNext(); ) {
        String cacheId = (String) it.next();
        CacheStatus lcacheStatus = cachedArchives.get(cacheId);
        if (lcacheStatus.refcount == 0) {
          // delete this cache entry from the global list
          // and mark the localized file for deletion
          deleteSet.add(lcacheStatus);
          it.remove();
        }
      }
    }
    // do the deletion asynchronously, after releasing the global lock
    Thread cacheFileCleaner =
        new Thread(new CacheFileCleanTask(asyncDiskService, FileSystem.getLocal(conf), deleteSet));
    cacheFileCleaner.start();
  }

  /**
   * Delete a local path with asyncDiskService if available, or otherwise synchronously with local
   * file system.
   */
  private static void deleteLocalPath(
      MRAsyncDiskService asyncDiskService, LocalFileSystem fs, Path path) throws IOException {
    boolean deleted = false;
    if (asyncDiskService != null) {
      // Try to delete using asyncDiskService
      String localPathToDelete = path.toUri().getPath();
      deleted = asyncDiskService.moveAndDeleteAbsolutePath(localPathToDelete);
      if (!deleted) {
        LOG.warn(
            "Cannot find DistributedCache path "
                + localPathToDelete
                + " on any of the asyncDiskService volumes!");
      }
    }
    if (!deleted) {
      // If no asyncDiskService, we will delete the files synchronously
      fs.delete(path, true);
    }
    LOG.info("Deleted path " + path);
  }

  /*
   * Returns the relative path of the dir this cache will be localized in
   * relative path that this cache will be localized in. For
   * hdfs://hostname:port/absolute_path -- the relative path is
   * hostname/absolute path -- if it is just /absolute_path -- then the
   * relative path is hostname of DFS this mapred cluster is running
   * on/absolute_path
   */
  public static String makeRelative(URI cache, Configuration conf) throws IOException {
    String host = cache.getHost();
    if (host == null) {
      host = cache.getScheme();
    }
    if (host == null) {
      URI defaultUri = FileSystem.get(conf).getUri();
      host = defaultUri.getHost();
      if (host == null) {
        host = defaultUri.getScheme();
      }
    }
    String path = host + cache.getPath();
    path = path.replace(":/", "/"); // remove windows device colon
    return path;
  }

  static String getKey(URI cache, Configuration conf, long timeStamp) throws IOException {
    return makeRelative(cache, conf) + String.valueOf(timeStamp);
  }

  private static Path checkCacheStatusValidity(
      Configuration conf,
      URI cache,
      long confFileStamp,
      CacheStatus cacheStatus,
      FileStatus fileStatus,
      boolean isArchive)
      throws IOException {
    FileSystem fs = FileSystem.get(cache, conf);
    // Has to be
    if (!ifExistsAndFresh(conf, fs, cache, confFileStamp, cacheStatus, fileStatus)) {
      throw new IOException(
          "Stale cache file: " + cacheStatus.localizedLoadPath + " for cache-file: " + cache);
    }
    LOG.info(
        String.format(
            "Using existing cache of %s->%s", cache.toString(), cacheStatus.localizedLoadPath));
    return cacheStatus.localizedLoadPath;
  }

  private static void createSymlink(
      Configuration conf,
      URI cache,
      CacheStatus cacheStatus,
      boolean isArchive,
      Path currentWorkDir,
      boolean honorSymLinkConf)
      throws IOException {
    boolean doSymlink = honorSymLinkConf && DistributedCache.getSymlink(conf);
    if (cache.getFragment() == null) {
      doSymlink = false;
    }
    String link = currentWorkDir.toString() + Path.SEPARATOR + cache.getFragment();
    File flink = new File(link);
    if (doSymlink) {
      if (!flink.exists()) {
        FileUtil.symLink(cacheStatus.localizedLoadPath.toString(), link);
      }
    }
  }

  // the method which actually copies the caches locally and unjars/unzips them
  // and does chmod for the files
  private static Path localizeCache(
      Configuration conf, URI cache, long confFileStamp, CacheStatus cacheStatus, boolean isArchive)
      throws IOException {
    FileSystem fs = getFileSystem(cache, conf);
    FileSystem localFs = FileSystem.getLocal(conf);
    Path parchive = null;

    if (isArchive) {
      parchive =
          new Path(
              cacheStatus.localizedLoadPath, new Path(cacheStatus.localizedLoadPath.getName()));
    } else {
      parchive = cacheStatus.localizedLoadPath;
    }
    if (!localFs.mkdirs(parchive.getParent())) {
      throw new IOException(
          "Mkdirs failed to create directory " + cacheStatus.localizedLoadPath.toString());
    }
    String cacheId = cache.getPath();

    fs.copyToLocalFile(new Path(cacheId), parchive);
    if (isArchive) {
      String tmpArchive = parchive.toString().toLowerCase();
      File srcFile = new File(parchive.toString());
      File destDir = new File(parchive.getParent().toString());
      if (tmpArchive.endsWith(".jar")) {
        RunJar.unJar(srcFile, destDir);
      } else if (tmpArchive.endsWith(".zip")) {
        FileUtil.unZip(srcFile, destDir);
      } else if (isTarFile(tmpArchive)) {
        FileUtil.unTar(srcFile, destDir);
      }
      // else will not do anyhting
      // and copy the file into the dir as it is
    }
    long cacheSize = FileUtil.getDU(new File(parchive.getParent().toString()));
    cacheStatus.size = cacheSize;
    addCacheInfoUpdate(cacheStatus);

    // do chmod here
    try {
      // Setting recursive permission to grant everyone read and execute
      Path localDir = new Path(cacheStatus.localizedBaseDir, cacheStatus.uniqueParentDir);
      LOG.info("Doing chmod on localdir :" + localDir);
      FileUtil.chmod(localDir.toString(), "ugo+rx", true);
    } catch (InterruptedException e) {
      LOG.warn("Exception in chmod" + e.toString());
    }

    // update cacheStatus to reflect the newly cached file
    cacheStatus.mtime = getTimestamp(conf, cache);
    return cacheStatus.localizedLoadPath;
  }

  private static boolean isTarFile(String filename) {
    return (filename.endsWith(".tgz") || filename.endsWith(".tar.gz") || filename.endsWith(".tar"));
  }

  // Checks if the cache has already been localized and is fresh
  private static boolean ifExistsAndFresh(
      Configuration conf,
      FileSystem fs,
      URI cache,
      long confFileStamp,
      CacheStatus lcacheStatus,
      FileStatus fileStatus)
      throws IOException {
    // check for existence of the cache
    long dfsFileStamp;
    if (fileStatus != null) {
      dfsFileStamp = fileStatus.getModificationTime();
    } else {
      dfsFileStamp = getTimestamp(conf, cache);
    }

    // ensure that the file on hdfs hasn't been modified since the job started
    if (dfsFileStamp != confFileStamp) {
      LOG.fatal("File: " + cache + " has changed on HDFS since job started");
      throw new IOException("File: " + cache + " has changed on HDFS since job started");
    }

    if (dfsFileStamp != lcacheStatus.mtime) {
      // needs refreshing
      return false;
    }

    return true;
  }

  /**
   * Returns mtime of a given cache file on hdfs.
   *
   * @param conf configuration
   * @param cache cache file
   * @return mtime of a given cache file on hdfs
   * @throws IOException
   */
  public static long getTimestamp(Configuration conf, URI cache) throws IOException {
    FileSystem fileSystem = FileSystem.get(cache, conf);
    Path filePath = new Path(cache.getPath());

    return fileSystem.getFileStatus(filePath).getModificationTime();
  }

  /**
   * Returns the status of a given cache file on hdfs.
   *
   * @param conf configuration
   * @param cache cache file
   * @return FileStatus object of the file
   * @throws IOException
   */
  public static FileStatus getFileStatus(Configuration conf, URI cache) throws IOException {
    FileSystem fileSystem = FileSystem.get(cache, conf);
    Path filePath = new Path(cache.getPath());

    return fileSystem.getFileStatus(filePath);
  }

  /**
   * This method create symlinks for all files in a given dir in another directory
   *
   * @param conf the configuration
   * @param jobCacheDir the target directory for creating symlinks
   * @param workDir the directory in which the symlinks are created
   * @throws IOException
   */
  public static void createAllSymlink(Configuration conf, File jobCacheDir, File workDir)
      throws IOException {
    if ((jobCacheDir == null || !jobCacheDir.isDirectory())
        || workDir == null
        || (!workDir.isDirectory())) {
      return;
    }
    boolean createSymlink = getSymlink(conf);
    if (createSymlink) {
      File[] list = jobCacheDir.listFiles();
      for (int i = 0; i < list.length; i++) {
        FileUtil.symLink(
            list[i].getAbsolutePath(), new File(workDir, list[i].getName()).toString());
      }
    }
  }

  private static String getFileSysName(URI url) {
    String fsname = url.getScheme();
    if ("hdfs".equals(fsname)) {
      String host = url.getHost();
      int port = url.getPort();
      return (port == (-1)) ? host : (host + ":" + port);
    } else {
      return null;
    }
  }

  private static FileSystem getFileSystem(URI cache, Configuration conf) throws IOException {
    String fileSysName = getFileSysName(cache);
    if (fileSysName != null) return FileSystem.getNamed(fileSysName, conf);
    else return FileSystem.get(conf);
  }

  /**
   * Set the configuration with the given set of archives
   *
   * @param archives The list of archives that need to be localized
   * @param conf Configuration which will be changed
   */
  public static void setCacheArchives(URI[] archives, Configuration conf) {
    String sarchives = StringUtils.uriToString(archives);
    conf.set("mapred.cache.archives", sarchives);
  }

  /**
   * Set the configuration with the given set of files
   *
   * @param files The list of files that need to be localized
   * @param conf Configuration which will be changed
   */
  public static void setCacheFiles(URI[] files, Configuration conf) {
    String sfiles = StringUtils.uriToString(files);
    conf.set("mapred.cache.files", sfiles);
  }

  /**
   * Get cache archives set in the Configuration
   *
   * @param conf The configuration which contains the archives
   * @return A URI array of the caches set in the Configuration
   * @throws IOException
   */
  public static URI[] getCacheArchives(Configuration conf) throws IOException {
    return StringUtils.stringToURI(conf.getStrings("mapred.cache.archives"));
  }

  /**
   * Get cache archives set in the Configuration
   *
   * @param conf The configuration which contains the archives
   * @return A URI array of the caches set in the Configuration
   * @throws IOException
   */
  public static URI[] getSharedCacheArchives(Configuration conf) throws IOException {
    return StringUtils.stringToURI(conf.getStrings("mapred.cache.shared.archives"));
  }

  /**
   * Get cache files set in the Configuration
   *
   * @param conf The configuration which contains the files
   * @return A URI array of the files set in the Configuration
   * @throws IOException
   */
  public static URI[] getCacheFiles(Configuration conf) throws IOException {
    return StringUtils.stringToURI(conf.getStrings("mapred.cache.files"));
  }

  /**
   * Get cache files set in the Configuration
   *
   * @param conf The configuration which contains the files
   * @return A URI array of the files set in the Configuration
   * @throws IOException
   */
  public static URI[] getSharedCacheFiles(Configuration conf) throws IOException {
    return StringUtils.stringToURI(conf.getStrings("mapred.cache.shared.files"));
  }

  /**
   * Return the path array of the localized caches
   *
   * @param conf Configuration that contains the localized archives
   * @return A path array of localized caches
   * @throws IOException
   */
  public static Path[] getLocalCacheArchives(Configuration conf) throws IOException {
    return StringUtils.stringToPath(conf.getStrings("mapred.cache.localArchives"));
  }

  /**
   * Return the path array of the localized caches
   *
   * @param conf Configuration that contains the localized archives
   * @return A path array of localized caches
   * @throws IOException
   */
  public static Path[] getLocalSharedCacheArchives(Configuration conf) throws IOException {
    return StringUtils.stringToPath(conf.getStrings("mapred.cache.shared.localArchives"));
  }

  /**
   * Return the path array of the localized files
   *
   * @param conf Configuration that contains the localized files
   * @return A path array of localized files
   * @throws IOException
   */
  public static Path[] getLocalCacheFiles(Configuration conf) throws IOException {
    return StringUtils.stringToPath(conf.getStrings("mapred.cache.localFiles"));
  }

  /**
   * Return the path array of the localized files
   *
   * @param conf Configuration that contains the localized files
   * @return A path array of localized files
   * @throws IOException
   */
  public static Path[] getLocalSharedCacheFiles(Configuration conf) throws IOException {
    return StringUtils.stringToPath(conf.getStrings("mapred.cache.shared.localFiles"));
  }

  /**
   * Get the timestamps of the archives
   *
   * @param conf The configuration which stored the timestamps
   * @return a string array of timestamps
   * @throws IOException
   */
  public static String[] getArchiveTimestamps(Configuration conf) {
    return conf.getStrings("mapred.cache.archives.timestamps");
  }

  /**
   * Get the timestamps of the files
   *
   * @param conf The configuration which stored the timestamps
   * @return a string array of timestamps
   * @throws IOException
   */
  public static String[] getFileTimestamps(Configuration conf) {
    return conf.getStrings("mapred.cache.files.timestamps");
  }

  public static String[] getSharedArchiveLength(Configuration conf) {
    return conf.getStrings("mapred.cache.shared.archives.length");
  }

  public static String[] getSharedFileLength(Configuration conf) {
    return conf.getStrings("mapred.cache.shared.files.length");
  }

  /**
   * This is to check the timestamp of the archives to be localized
   *
   * @param conf Configuration which stores the timestamp's
   * @param timestamps comma separated list of timestamps of archives. The order should be the same
   *     as the order in which the archives are added.
   */
  public static void setArchiveTimestamps(Configuration conf, String timestamps) {
    conf.set("mapred.cache.archives.timestamps", timestamps);
  }

  public static void setSharedArchiveLength(Configuration conf, String length) {
    conf.set("mapred.cache.shared.archives.length", length);
  }

  /**
   * This is to check the timestamp of the files to be localized
   *
   * @param conf Configuration which stores the timestamp's
   * @param timestamps comma separated list of timestamps of files. The order should be the same as
   *     the order in which the files are added.
   */
  public static void setFileTimestamps(Configuration conf, String timestamps) {
    conf.set("mapred.cache.files.timestamps", timestamps);
  }

  public static void setSharedFileLength(Configuration conf, String length) {
    conf.set("mapred.cache.shared.files.length", length);
  }

  /**
   * Set the conf to contain the location for localized archives
   *
   * @param conf The conf to modify to contain the localized caches
   * @param str a comma separated list of local archives
   */
  public static void setLocalArchives(Configuration conf, String str) {
    conf.set("mapred.cache.localArchives", str);
  }

  /**
   * Set the conf to contain the location for localized archives
   *
   * @param conf The conf to modify to contain the localized caches
   * @param str a comma separated list of local archives
   */
  public static void setLocalSharedArchives(Configuration conf, String str) {
    conf.set("mapred.cache.shared.localArchives", str);
  }

  /**
   * Set the conf to contain the location for localized files
   *
   * @param conf The conf to modify to contain the localized caches
   * @param str a comma separated list of local files
   */
  public static void setLocalFiles(Configuration conf, String str) {
    conf.set("mapred.cache.localFiles", str);
  }

  /**
   * Set the conf to contain the location for localized files
   *
   * @param conf The conf to modify to contain the localized caches
   * @param str a comma separated list of local files
   */
  public static void setLocalSharedFiles(Configuration conf, String str) {
    conf.set("mapred.cache.shared.localFiles", str);
  }

  /**
   * Add a archives to be localized to the conf
   *
   * @param uri The uri of the cache to be localized
   * @param conf Configuration to add the cache to
   */
  public static void addCacheArchive(URI uri, Configuration conf) {
    String archives = conf.get("mapred.cache.archives");
    conf.set(
        "mapred.cache.archives",
        archives == null ? uri.toString() : archives + "," + uri.toString());
  }

  /**
   * Add a archives to be localized to the conf
   *
   * @param uri The uri of the cache to be localized
   * @param conf Configuration to add the cache to
   */
  public static void addSharedCacheArchive(URI uri, Configuration conf) {
    String archives = conf.get("mapred.cache.shared.archives");
    conf.set(
        "mapred.cache.shared.archives",
        archives == null ? uri.toString() : archives + "," + uri.toString());
  }

  /**
   * Add a file to be localized to the conf
   *
   * @param uri The uri of the cache to be localized
   * @param conf Configuration to add the cache to
   */
  public static void addCacheFile(URI uri, Configuration conf) {
    String files = conf.get("mapred.cache.files");
    conf.set("mapred.cache.files", files == null ? uri.toString() : files + "," + uri.toString());
  }

  /**
   * Add a file to be localized to the conf
   *
   * @param uri The uri of the cache to be localized
   * @param conf Configuration to add the cache to
   */
  public static void addSharedCacheFile(URI uri, Configuration conf) {
    String files = conf.get("mapred.cache.shared.files");
    conf.set(
        "mapred.cache.shared.files", files == null ? uri.toString() : files + "," + uri.toString());
  }

  /**
   * Add an file path to the current set of classpath entries It adds the file to cache as well.
   *
   * @param file Path of the file to be added
   * @param conf Configuration that contains the classpath setting
   */
  public static void addFileToClassPath(Path file, Configuration conf) throws IOException {
    String classpath = conf.get("mapred.job.classpath.files");
    conf.set(
        "mapred.job.classpath.files",
        classpath == null
            ? file.toString()
            : classpath + System.getProperty("path.separator") + file.toString());
    URI uri = file.makeQualified(file.getFileSystem(conf)).toUri();

    addCacheFile(uri, conf);
  }

  /**
   * Get the file entries in classpath as an array of Path
   *
   * @param conf Configuration that contains the classpath setting
   */
  public static Path[] getFileClassPaths(Configuration conf) {
    String classpath = conf.get("mapred.job.classpath.files");
    if (classpath == null) return null;
    ArrayList list =
        Collections.list(new StringTokenizer(classpath, System.getProperty("path.separator")));
    Path[] paths = new Path[list.size()];
    for (int i = 0; i < list.size(); i++) {
      paths[i] = new Path((String) list.get(i));
    }
    return paths;
  }

  private static URI addArchiveToClassPathHelper(Path archive, Configuration conf)
      throws IOException {

    String classpath = conf.get("mapred.job.classpath.archives");

    // the scheme/authority use ':' as separator. put the unqualified path in classpath
    String archivePath = archive.toUri().getPath();

    conf.set(
        "mapred.job.classpath.archives",
        classpath == null
            ? archivePath
            : classpath + System.getProperty("path.separator") + archivePath);
    return archive.makeQualified(archive.getFileSystem(conf)).toUri();
  }

  /**
   * Add an archive path to the current set of classpath entries. It adds the archive to cache as
   * well.
   *
   * @param archive Path of the archive to be added
   * @param conf Configuration that contains the classpath setting
   */
  public static void addArchiveToClassPath(Path archive, Configuration conf) throws IOException {
    URI uri = addArchiveToClassPathHelper(archive, conf);
    addCacheArchive(uri, conf);
  }

  /**
   * Add an archive path to the current set of classpath entries. It adds the archive to cache as
   * well.
   *
   * @param archive Path of the archive to be added
   * @param conf Configuration that contains the classpath setting
   */
  public static void addSharedArchiveToClassPath(Path archive, Configuration conf)
      throws IOException {
    URI uri = addArchiveToClassPathHelper(archive, conf);
    addSharedCacheArchive(uri, conf);
  }

  /**
   * Get the archive entries in classpath as an array of Path
   *
   * @param conf Configuration that contains the classpath setting
   */
  public static Path[] getArchiveClassPaths(Configuration conf) {
    String classpath = conf.get("mapred.job.classpath.archives");
    if (classpath == null) return null;
    ArrayList list =
        Collections.list(new StringTokenizer(classpath, System.getProperty("path.separator")));
    Path[] paths = new Path[list.size()];
    for (int i = 0; i < list.size(); i++) {
      paths[i] = new Path((String) list.get(i));
    }
    return paths;
  }

  /**
   * This method allows you to create symlinks in the current working directory of the task to all
   * the cache files/archives
   *
   * @param conf the jobconf
   */
  public static void createSymlink(Configuration conf) {
    conf.set("mapred.create.symlink", "yes");
  }

  /**
   * This method checks to see if symlinks are to be create for the localized cache files in the
   * current working directory
   *
   * @param conf the jobconf
   * @return true if symlinks are to be created- else return false
   */
  public static boolean getSymlink(Configuration conf) {
    String result = conf.get("mapred.create.symlink");
    if ("yes".equals(result)) {
      return true;
    }
    return false;
  }

  /**
   * This method checks if there is a conflict in the fragment names of the uris. Also makes sure
   * that each uri has a fragment. It is only to be called if you want to create symlinks for the
   * various archives and files.
   *
   * @param uriFiles The uri array of urifiles
   * @param uriArchives the uri array of uri archives
   */
  public static boolean checkURIs(URI[] uriFiles, URI[] uriArchives) {
    if ((uriFiles == null) && (uriArchives == null)) {
      return true;
    }
    if (uriFiles != null) {
      for (int i = 0; i < uriFiles.length; i++) {
        String frag1 = uriFiles[i].getFragment();
        if (frag1 == null) return false;
        for (int j = i + 1; j < uriFiles.length; j++) {
          String frag2 = uriFiles[j].getFragment();
          if (frag2 == null) return false;
          if (frag1.equalsIgnoreCase(frag2)) return false;
        }
        if (uriArchives != null) {
          for (int j = 0; j < uriArchives.length; j++) {
            String frag2 = uriArchives[j].getFragment();
            if (frag2 == null) {
              return false;
            }
            if (frag1.equalsIgnoreCase(frag2)) return false;
            for (int k = j + 1; k < uriArchives.length; k++) {
              String frag3 = uriArchives[k].getFragment();
              if (frag3 == null) return false;
              if (frag2.equalsIgnoreCase(frag3)) return false;
            }
          }
        }
      }
    }
    return true;
  }

  private static class CacheStatus {
    // the local load path of this cache
    Path localizedLoadPath;

    // the base dir where the cache lies
    Path localizedBaseDir;

    // the unique directory in localizedBaseDir, where the cache lies
    Path uniqueParentDir;

    // the size of this cache
    long size;

    // number of instances using this cache
    int refcount;

    // the cache-file modification time
    long mtime;

    // is it initialized
    boolean inited = false;

    public CacheStatus(Path baseDir, Path localLoadPath, Path uniqueParentDir) {
      super();
      this.localizedLoadPath = localLoadPath;
      this.refcount = 0;
      this.mtime = -1;
      this.localizedBaseDir = baseDir;
      this.size = 0;
      this.uniqueParentDir = uniqueParentDir;
    }

    // get the base dir for the cache
    Path getBaseDir() {
      return localizedBaseDir;
    }

    // Is it initialized?
    boolean isInited() {
      return inited;
    }

    // mark it as initalized
    void initComplete() {
      inited = true;
    }
  }

  /**
   * Clear the entire contents of the cache and delete the backing files. This should only be used
   * when the server is reinitializing, because the users are going to lose their files.
   */
  public static void purgeCache(Configuration conf, MRAsyncDiskService service) throws IOException {
    synchronized (cachedArchives) {
      LocalFileSystem localFs = FileSystem.getLocal(conf);
      for (Map.Entry<String, CacheStatus> f : cachedArchives.entrySet()) {
        try {
          deleteLocalPath(service, localFs, f.getValue().localizedLoadPath);
        } catch (IOException ie) {
          LOG.debug("Error cleaning up cache", ie);
        }
      }
      cachedArchives.clear();
    }
  }

  /**
   * Update the maps baseDirSize and baseDirNumberSubDir when deleting cache.
   *
   * @param cacheStatus cache status of the cache is deleted
   */
  private static void deleteCacheInfoUpdate(CacheStatus cacheStatus) {
    if (!cacheStatus.isInited()) {
      // if it is not created yet, do nothing.
      return;
    }
    synchronized (baseDirSize) {
      Long dirSize = baseDirSize.get(cacheStatus.getBaseDir());
      if (dirSize != null) {
        dirSize -= cacheStatus.size;
        baseDirSize.put(cacheStatus.getBaseDir(), dirSize);
      }
    }
    synchronized (baseDirNumberSubDir) {
      Integer dirSubDir = baseDirNumberSubDir.get(cacheStatus.getBaseDir());
      if (dirSubDir != null) {
        dirSubDir--;
        baseDirNumberSubDir.put(cacheStatus.getBaseDir(), dirSubDir);
      }
    }
  }

  /**
   * Update the maps baseDirSize and baseDirNumberSubDir when adding cache.
   *
   * @param cacheStatus cache status of the cache is added
   */
  private static void addCacheInfoUpdate(CacheStatus cacheStatus) {
    long cacheSize = cacheStatus.size;
    synchronized (baseDirSize) {
      Long dirSize = baseDirSize.get(cacheStatus.getBaseDir());
      if (dirSize == null) {
        dirSize = Long.valueOf(cacheSize);
      } else {
        dirSize += cacheSize;
      }
      baseDirSize.put(cacheStatus.getBaseDir(), dirSize);
    }
    synchronized (baseDirNumberSubDir) {
      Integer dirSubDir = baseDirNumberSubDir.get(cacheStatus.getBaseDir());
      if (dirSubDir == null) {
        dirSubDir = 1;
      } else {
        dirSubDir += 1;
      }
      baseDirNumberSubDir.put(cacheStatus.getBaseDir(), dirSubDir);
    }
  }
}
/**
 * Shared functionality for hadoopStreaming formats. A custom reader can be defined to be a
 * RecordReader with the constructor below and is selected with the option bin/hadoopStreaming
 * -inputreader ...
 *
 * @see StreamXmlRecordReader
 */
public abstract class StreamBaseRecordReader implements RecordReader<Text, Text> {

  protected static final Log LOG = LogFactory.getLog(StreamBaseRecordReader.class.getName());

  // custom JobConf properties for this class are prefixed with this namespace
  static final String CONF_NS = "stream.recordreader.";

  public StreamBaseRecordReader(
      FSDataInputStream in, FileSplit split, Reporter reporter, JobConf job, FileSystem fs)
      throws IOException {
    in_ = in;
    split_ = split;
    start_ = split_.getStart();
    length_ = split_.getLength();
    end_ = start_ + length_;
    splitName_ = split_.getPath().getName();
    reporter_ = reporter;
    job_ = job;
    fs_ = fs;

    statusMaxRecordChars_ = job_.getInt(CONF_NS + "statuschars", 200);
  }

  /// RecordReader API

  /** Read a record. Implementation should call numRecStats at the end */
  public abstract boolean next(Text key, Text value) throws IOException;

  /** This implementation always returns true. */
  public void validateInput(JobConf job) throws IOException {}

  /** Returns the current position in the input. */
  public synchronized long getPos() throws IOException {
    return in_.getPos();
  }

  /** Close this to future operations. */
  public synchronized void close() throws IOException {
    in_.close();
  }

  public float getProgress() throws IOException {
    if (end_ == start_) {
      return 1.0f;
    } else {
      return ((float) (in_.getPos() - start_)) / ((float) (end_ - start_));
    }
  }

  public Text createKey() {
    return new Text();
  }

  public Text createValue() {
    return new Text();
  }

  /// StreamBaseRecordReader API

  /**
   * Implementation should seek forward in_ to the first byte of the next record. The initial byte
   * offset in the stream is arbitrary.
   */
  public abstract void seekNextRecordBoundary() throws IOException;

  void numRecStats(byte[] record, int start, int len) throws IOException {
    numRec_++;
    if (numRec_ == nextStatusRec_) {
      String recordStr = new String(record, start, Math.min(len, statusMaxRecordChars_), "UTF-8");
      nextStatusRec_ += 100; // *= 10;
      String status = getStatus(recordStr);
      LOG.info(status);
      reporter_.setStatus(status);
    }
  }

  long lastMem = 0;

  String getStatus(CharSequence record) {
    long pos = -1;
    try {
      pos = getPos();
    } catch (IOException io) {
    }
    String recStr;
    if (record.length() > statusMaxRecordChars_) {
      recStr = record.subSequence(0, statusMaxRecordChars_) + "...";
    } else {
      recStr = record.toString();
    }
    String unqualSplit =
        split_.getPath().getName() + ":" + split_.getStart() + "+" + split_.getLength();
    String status =
        "HSTR "
            + StreamUtil.HOST
            + " "
            + numRec_
            + ". pos="
            + pos
            + " "
            + unqualSplit
            + " Processing record="
            + recStr;
    status += " " + splitName_;
    return status;
  }

  FSDataInputStream in_;
  FileSplit split_;
  long start_;
  long end_;
  long length_;
  String splitName_;
  Reporter reporter_;
  JobConf job_;
  FileSystem fs_;
  int numRec_ = 0;
  int nextStatusRec_ = 1;
  int statusMaxRecordChars_;
}
  private static class AllocatorPerContext {

    private final Log LOG = LogFactory.getLog(AllocatorPerContext.class);

    private int dirNumLastAccessed;
    private Random dirIndexRandomizer = new Random();
    private FileSystem localFS;
    private DF[] dirDF;
    private String contextCfgItemName;
    private Path[] localDirsPath;
    private String savedLocalDirs = "";

    public AllocatorPerContext(String contextCfgItemName) {
      this.contextCfgItemName = contextCfgItemName;
    }

    /**
     * This method gets called everytime before any read/write to make sure that any change to
     * localDirs is reflected immediately.
     */
    private synchronized void confChanged(Configuration conf) throws IOException {
      String newLocalDirs = conf.get(contextCfgItemName);
      if (!newLocalDirs.equals(savedLocalDirs)) {
        String[] localDirs = conf.getStrings(contextCfgItemName);
        localFS = FileSystem.getLocal(conf);
        int numDirs = localDirs.length;
        ArrayList<String> dirs = new ArrayList<String>(numDirs);
        ArrayList<DF> dfList = new ArrayList<DF>(numDirs);
        for (int i = 0; i < numDirs; i++) {
          try {
            // filter problematic directories
            Path tmpDir = new Path(localDirs[i]);
            if (localFS.mkdirs(tmpDir) || localFS.exists(tmpDir)) {
              try {
                DiskChecker.checkDir(new File(localDirs[i]));
                dirs.add(localDirs[i]);
                dfList.add(new DF(new File(localDirs[i]), 30000));
              } catch (DiskErrorException de) {
                LOG.warn(localDirs[i] + "is not writable\n" + StringUtils.stringifyException(de));
              }
            } else {
              LOG.warn("Failed to create " + localDirs[i]);
            }
          } catch (IOException ie) {
            LOG.warn(
                "Failed to create "
                    + localDirs[i]
                    + ": "
                    + ie.getMessage()
                    + "\n"
                    + StringUtils.stringifyException(ie));
          } // ignore
        }
        localDirsPath = new Path[dirs.size()];
        for (int i = 0; i < localDirsPath.length; i++) {
          localDirsPath[i] = new Path(dirs.get(i));
        }
        dirDF = dfList.toArray(new DF[dirs.size()]);
        savedLocalDirs = newLocalDirs;

        // randomize the first disk picked in the round-robin selection
        dirNumLastAccessed = dirIndexRandomizer.nextInt(dirs.size());
      }
    }

    private Path createPath(Path path, boolean checkWrite) throws IOException {
      Path file = new Path(localDirsPath[dirNumLastAccessed], path);

      if (checkWrite) {
        // check whether we are able to create a directory here. If the disk
        // happens to be RDONLY we will fail
        try {
          DiskChecker.checkDir(new File(file.getParent().toUri().getPath()));
        } catch (DiskErrorException d) {
          LOG.warn(StringUtils.stringifyException(d));
          return null;
        }
      }
      return file;
    }

    /**
     * Get the current directory index.
     *
     * @return the current directory index.
     */
    int getCurrentDirectoryIndex() {
      return dirNumLastAccessed;
    }

    /**
     * Get a path from the local FS. If size is known, we go round-robin over the set of disks (via
     * the configured dirs) and return the first complete path which has enough space.
     *
     * <p>If size is not known, use roulette selection -- pick directories with probability
     * proportional to their available space.
     */
    public synchronized Path getLocalPathForWrite(
        String pathStr, long size, Configuration conf, boolean checkWrite) throws IOException {
      confChanged(conf);
      int numDirs = localDirsPath.length;
      int numDirsSearched = 0;
      // remove the leading slash from the path (to make sure that the uri
      // resolution results in a valid path on the dir being checked)
      if (pathStr.startsWith("/")) {
        pathStr = pathStr.substring(1);
      }
      Path returnPath = null;
      Path path = new Path(pathStr);

      if (size == SIZE_UNKNOWN) { // do roulette selection: pick dir with probability
        // proportional to available size
        long[] availableOnDisk = new long[dirDF.length];
        long totalAvailable = 0;

        // build the "roulette wheel"
        for (int i = 0; i < dirDF.length; ++i) {
          availableOnDisk[i] = dirDF[i].getAvailable();
          totalAvailable += availableOnDisk[i];
        }

        // Keep rolling the wheel till we get a valid path
        Random r = new java.util.Random();
        while (numDirsSearched < numDirs && returnPath == null) {
          long randomPosition = Math.abs(r.nextLong()) % totalAvailable;
          int dir = 0;
          while (randomPosition > availableOnDisk[dir]) {
            randomPosition -= availableOnDisk[dir];
            dir++;
          }
          dirNumLastAccessed = dir;
          returnPath = createPath(path, checkWrite);
          if (returnPath == null) {
            totalAvailable -= availableOnDisk[dir];
            availableOnDisk[dir] = 0; // skip this disk
            numDirsSearched++;
          }
        }
      } else {
        while (numDirsSearched < numDirs && returnPath == null) {
          long capacity = dirDF[dirNumLastAccessed].getAvailable();
          if (capacity > size) {
            returnPath = createPath(path, checkWrite);
          }
          dirNumLastAccessed++;
          dirNumLastAccessed = dirNumLastAccessed % numDirs;
          numDirsSearched++;
        }
      }
      if (returnPath != null) {
        return returnPath;
      }

      // no path found
      throw new DiskErrorException("Could not find any valid local " + "directory for " + pathStr);
    }

    /**
     * Creates a file on the local FS. Pass size as {@link LocalDirAllocator.SIZE_UNKNOWN} if not
     * known apriori. We round-robin over the set of disks (via the configured dirs) and return a
     * file on the first path which has enough space. The file is guaranteed to go away when the JVM
     * exits.
     */
    public File createTmpFileForWrite(String pathStr, long size, Configuration conf)
        throws IOException {

      // find an appropriate directory
      Path path = getLocalPathForWrite(pathStr, size, conf, true);
      File dir = new File(path.getParent().toUri().getPath());
      String prefix = path.getName();

      // create a temp file on this directory
      File result = File.createTempFile(prefix, null, dir);
      result.deleteOnExit();
      return result;
    }

    /**
     * Get a path from the local FS for reading. We search through all the configured dirs for the
     * file's existence and return the complete path to the file when we find one
     */
    public synchronized Path getLocalPathToRead(String pathStr, Configuration conf)
        throws IOException {
      confChanged(conf);
      int numDirs = localDirsPath.length;
      int numDirsSearched = 0;
      // remove the leading slash from the path (to make sure that the uri
      // resolution results in a valid path on the dir being checked)
      if (pathStr.startsWith("/")) {
        pathStr = pathStr.substring(1);
      }
      Path childPath = new Path(pathStr);
      while (numDirsSearched < numDirs) {
        Path file = new Path(localDirsPath[numDirsSearched], childPath);
        if (localFS.exists(file)) {
          return file;
        }
        numDirsSearched++;
      }

      // no path found
      throw new DiskErrorException(
          "Could not find " + pathStr + " in any of" + " the configured local directories");
    }

    private static class PathIterator implements Iterator<Path>, Iterable<Path> {
      private final FileSystem fs;
      private final String pathStr;
      private int i = 0;
      private final Path[] rootDirs;
      private Path next = null;

      private PathIterator(FileSystem fs, String pathStr, Path[] rootDirs) throws IOException {
        this.fs = fs;
        this.pathStr = pathStr;
        this.rootDirs = rootDirs;
        advance();
      }

      @Override
      public boolean hasNext() {
        return next != null;
      }

      private void advance() throws IOException {
        while (i < rootDirs.length) {
          next = new Path(rootDirs[i++], pathStr);
          if (fs.exists(next)) {
            return;
          }
        }
        next = null;
      }

      @Override
      public Path next() {
        Path result = next;
        try {
          advance();
        } catch (IOException ie) {
          throw new RuntimeException("Can't check existance of " + next, ie);
        }
        return result;
      }

      @Override
      public void remove() {
        throw new UnsupportedOperationException("read only iterator");
      }

      @Override
      public Iterator<Path> iterator() {
        return this;
      }
    }

    /**
     * Get all of the paths that currently exist in the working directories.
     *
     * @param pathStr the path underneath the roots
     * @param conf the configuration to look up the roots in
     * @return all of the paths that exist under any of the roots
     * @throws IOException
     */
    synchronized Iterable<Path> getAllLocalPathsToRead(String pathStr, Configuration conf)
        throws IOException {
      confChanged(conf);
      if (pathStr.startsWith("/")) {
        pathStr = pathStr.substring(1);
      }
      return new PathIterator(localFS, pathStr, localDirsPath);
    }

    /**
     * We search through all the configured dirs for the file's existence and return true when we
     * find one
     */
    public synchronized boolean ifExists(String pathStr, Configuration conf) {
      try {
        int numDirs = localDirsPath.length;
        int numDirsSearched = 0;
        // remove the leading slash from the path (to make sure that the uri
        // resolution results in a valid path on the dir being checked)
        if (pathStr.startsWith("/")) {
          pathStr = pathStr.substring(1);
        }
        Path childPath = new Path(pathStr);
        while (numDirsSearched < numDirs) {
          Path file = new Path(localDirsPath[numDirsSearched], childPath);
          if (localFS.exists(file)) {
            return true;
          }
          numDirsSearched++;
        }
      } catch (IOException e) {
        // IGNORE and try again
      }
      return false;
    }
  }
/**
 * To generate automatically reports from list mode.
 *
 * <p>Uses JasperReports.
 *
 * @author Javier Paniza
 */
public class GenerateReportServlet extends HttpServlet {

  private static Log log = LogFactory.getLog(GenerateReportServlet.class);

  public static class TableModelDecorator implements TableModel {

    private TableModel original;
    private List metaProperties;
    private boolean withValidValues = false;
    private Locale locale;
    private boolean labelAsHeader = false;
    private HttpServletRequest request;
    private boolean format =
        false; // format or no the values. If format = true, all values to the report are String
    private Integer columnCountLimit;

    public TableModelDecorator(
        HttpServletRequest request,
        TableModel original,
        List metaProperties,
        Locale locale,
        boolean labelAsHeader,
        boolean format,
        Integer columnCountLimit)
        throws Exception {
      this.request = request;
      this.original = original;
      this.metaProperties = metaProperties;
      this.locale = locale;
      this.withValidValues = calculateWithValidValues();
      this.labelAsHeader = labelAsHeader;
      this.format = format;
      this.columnCountLimit = columnCountLimit;
    }

    private boolean calculateWithValidValues() {
      Iterator it = metaProperties.iterator();
      while (it.hasNext()) {
        MetaProperty m = (MetaProperty) it.next();
        if (m.hasValidValues()) return true;
      }
      return false;
    }

    private MetaProperty getMetaProperty(int i) {
      return (MetaProperty) metaProperties.get(i);
    }

    public int getRowCount() {
      return original.getRowCount();
    }

    public int getColumnCount() {
      return columnCountLimit == null ? original.getColumnCount() : columnCountLimit;
    }

    public String getColumnName(int c) {
      return labelAsHeader
          ? getMetaProperty(c).getLabel(locale)
          : Strings.change(getMetaProperty(c).getQualifiedName(), ".", "_");
    }

    public Class getColumnClass(int c) {
      return original.getColumnClass(c);
    }

    public boolean isCellEditable(int row, int column) {
      return original.isCellEditable(row, column);
    }

    public Object getValueAt(int row, int column) {
      if (isFormat()) return getValueWithWebEditorsFormat(row, column);
      else return getValueWithoutWebEditorsFormat(row, column);
    }

    private Object getValueWithoutWebEditorsFormat(int row, int column) {
      Object r = original.getValueAt(row, column);

      if (r instanceof Boolean) {
        if (((Boolean) r).booleanValue()) return XavaResources.getString(locale, "yes");
        return XavaResources.getString(locale, "no");
      }
      if (withValidValues) {
        MetaProperty p = getMetaProperty(column);
        if (p.hasValidValues()) {
          return p.getValidValueLabel(locale, original.getValueAt(row, column));
        }
      }

      if (r instanceof java.util.Date) {
        MetaProperty p =
            getMetaProperty(column); // In order to use the type declared by the developer
        // and not the one returned by JDBC or the JPA engine
        if (java.sql.Time.class.isAssignableFrom(p.getType())) {
          return DateFormat.getTimeInstance(DateFormat.SHORT, locale).format(r);
        }
        if (java.sql.Timestamp.class.isAssignableFrom(p.getType())) {
          DateFormat dateFormat = new SimpleDateFormat("dd/MM/yyyy HH:mm:ss");
          return dateFormat.format(r);
        }
        return DateFormat.getDateInstance(DateFormat.SHORT, locale).format(r);
      }

      if (r instanceof BigDecimal) {
        return formatBigDecimal(r, locale);
      }

      return r;
    }

    private Object getValueWithWebEditorsFormat(int row, int column) {
      Object r = original.getValueAt(row, column);
      MetaProperty metaProperty = getMetaProperty(column);
      String result = WebEditors.format(this.request, metaProperty, r, null, "", true);
      if (isHtml(result)) { // this avoids that the report shows html content
        result = WebEditors.format(this.request, metaProperty, r, null, "", false);
      }
      return result;
    }

    public void setValueAt(Object value, int row, int column) {
      original.setValueAt(value, row, column);
    }

    public void addTableModelListener(TableModelListener l) {
      original.addTableModelListener(l);
    }

    public void removeTableModelListener(TableModelListener l) {
      original.removeTableModelListener(l);
    }

    private boolean isHtml(String value) {
      return value.matches("<.*>");
    }

    public boolean isFormat() {
      return format;
    }

    public void setFormat(boolean format) {
      this.format = format;
    }
  }

  protected void doGet(HttpServletRequest request, HttpServletResponse response)
      throws ServletException, IOException {
    try {
      Locales.setCurrent(request);
      if (Users.getCurrent() == null) { // for a bug in websphere portal 5.1 with Domino LDAP
        Users.setCurrent((String) request.getSession().getAttribute("xava.user"));
      }
      request.getParameter("application"); // for a bug in websphere 5.1
      request.getParameter("module"); // for a bug in websphere 5.1
      Tab tab = (Tab) request.getSession().getAttribute("xava_reportTab");
      int[] selectedRowsNumber =
          (int[]) request.getSession().getAttribute("xava_selectedRowsReportTab");
      Map[] selectedKeys = (Map[]) request.getSession().getAttribute("xava_selectedKeysReportTab");
      int[] selectedRows = getSelectedRows(selectedRowsNumber, selectedKeys, tab);
      request.getSession().removeAttribute("xava_selectedRowsReportTab");
      Integer columnCountLimit =
          (Integer) request.getSession().getAttribute("xava_columnCountLimitReportTab");
      request.getSession().removeAttribute("xava_columnCountLimitReportTab");

      setDefaultSchema(request);
      String user = (String) request.getSession().getAttribute("xava_user");
      request.getSession().removeAttribute("xava_user");
      Users.setCurrent(user);
      String uri = request.getRequestURI();
      if (uri.endsWith(".pdf")) {
        InputStream is;
        JRDataSource ds;
        Map parameters = new HashMap();
        synchronized (tab) {
          tab.setRequest(request);
          parameters.put("Title", tab.getTitle());
          parameters.put("Organization", getOrganization());
          parameters.put("Date", getCurrentDate());
          for (String totalProperty : tab.getTotalPropertiesNames()) {
            parameters.put(totalProperty + "__TOTAL__", getTotal(request, tab, totalProperty));
          }
          TableModel tableModel = getTableModel(request, tab, selectedRows, false, true, null);
          tableModel.getValueAt(0, 0);
          if (tableModel.getRowCount() == 0) {
            generateNoRowsPage(response);
            return;
          }
          is = getReport(request, response, tab, tableModel, columnCountLimit);
          ds = new JRTableModelDataSource(tableModel);
        }
        JasperPrint jprint = JasperFillManager.fillReport(is, parameters, ds);
        response.setContentType("application/pdf");
        response.setHeader(
            "Content-Disposition", "inline; filename=\"" + getFileName(tab) + ".pdf\"");
        JasperExportManager.exportReportToPdfStream(jprint, response.getOutputStream());
      } else if (uri.endsWith(".csv")) {
        String csvEncoding = XavaPreferences.getInstance().getCSVEncoding();
        if (!Is.emptyString(csvEncoding)) {
          response.setCharacterEncoding(csvEncoding);
        }
        response.setContentType("text/x-csv");
        response.setHeader(
            "Content-Disposition", "inline; filename=\"" + getFileName(tab) + ".csv\"");
        synchronized (tab) {
          tab.setRequest(request);
          response
              .getWriter()
              .print(
                  TableModels.toCSV(
                      getTableModel(request, tab, selectedRows, true, false, columnCountLimit)));
        }
      } else {
        throw new ServletException(
            XavaResources.getString("report_type_not_supported", "", ".pdf .csv"));
      }
    } catch (Exception ex) {
      log.error(ex.getMessage(), ex);
      throw new ServletException(XavaResources.getString("report_error"));
    } finally {
      request.getSession().removeAttribute("xava_reportTab");
    }
  }

  private void generateNoRowsPage(HttpServletResponse response) throws Exception {
    response.setContentType("text/html");
    response.getWriter().println("<html><head><title>");
    response.getWriter().println(XavaResources.getString("no_rows_report_message_title"));
    response
        .getWriter()
        .println(
            "</title></head><body style='font-family:Tahoma,Arial,sans-serif;color:black;background-color:white;'>");
    response.getWriter().println("<h1 style='font-size:22px;'>");
    response.getWriter().println(XavaResources.getString("no_rows_report_message_title"));
    response.getWriter().println("</h1>");
    response.getWriter().println("<p style='font-size:16px;'>");
    response.getWriter().println(XavaResources.getString("no_rows_report_message_detail"));
    response.getWriter().println("</p></body></html>");
  }

  private String getCurrentDate() {
    return java.text.DateFormat.getDateInstance(DateFormat.MEDIUM, Locales.getCurrent())
        .format(new java.util.Date());
  }

  private String getFileName(Tab tab) {
    String now = new SimpleDateFormat("yyyyMMdd_HHmm").format(new Date());
    return tab.getTitle() + " " + now;
  }

  private Object getTotal(HttpServletRequest request, Tab tab, String totalProperty) {
    Object total = tab.getTotal(totalProperty);
    return WebEditors.format(
        request, tab.getMetaProperty(totalProperty), total, new Messages(), null, true);
  }

  private void setDefaultSchema(HttpServletRequest request) {
    String hibernateDefaultSchemaTab =
        (String) request.getSession().getAttribute("xava_hibernateDefaultSchemaTab");
    if (hibernateDefaultSchemaTab != null) {
      request.getSession().removeAttribute("xava_hibernateDefaultSchemaTab");
      XHibernate.setDefaultSchema(hibernateDefaultSchemaTab);
    }
    String jpaDefaultSchemaTab =
        (String) request.getSession().getAttribute("xava_jpaDefaultSchemaTab");
    if (jpaDefaultSchemaTab != null) {
      request.getSession().removeAttribute("xava_jpaDefaultSchemaTab");
      XPersistence.setDefaultSchema(jpaDefaultSchemaTab);
    }
  }

  protected String getOrganization() throws MissingResourceException, XavaException {
    return ReportParametersProviderFactory.getInstance().getOrganization();
  }

  private InputStream getReport(
      HttpServletRequest request,
      HttpServletResponse response,
      Tab tab,
      TableModel tableModel,
      Integer columnCountLimit)
      throws ServletException, IOException {
    StringBuffer suri = new StringBuffer();
    suri.append("/xava/jasperReport");
    suri.append("?language=");
    suri.append(Locales.getCurrent().getLanguage());
    suri.append("&widths=");
    suri.append(Arrays.toString(getWidths(tableModel)));
    if (columnCountLimit != null) {
      suri.append("&columnCountLimit=");
      suri.append(columnCountLimit);
    }
    response.setCharacterEncoding(XSystem.getEncoding());
    return Servlets.getURIAsStream(request, response, suri.toString());
  }

  private int[] getWidths(TableModel tableModel) {
    int[] widths = new int[tableModel.getColumnCount()];
    for (int r = 0;
        r < Math.min(tableModel.getRowCount(), 500);
        r++) { // 500 is not for performance, but for using only a sample of data with huge table
      for (int c = 0; c < tableModel.getColumnCount(); c++) {
        Object o = tableModel.getValueAt(r, c);
        if (o instanceof String) {
          String s = ((String) o).trim();
          if (s.length() > widths[c]) widths[c] = s.length();
        }
      }
    }
    return widths;
  }

  private TableModel getTableModel(
      HttpServletRequest request,
      Tab tab,
      int[] selectedRows,
      boolean labelAsHeader,
      boolean format,
      Integer columnCountLimit)
      throws Exception {
    TableModel data = null;
    if (selectedRows != null && selectedRows.length > 0) {
      data = new SelectedRowsXTableModel(tab.getTableModel(), selectedRows);
    } else {
      data = tab.getAllDataTableModel();
    }
    return new TableModelDecorator(
        request,
        data,
        tab.getMetaProperties(),
        Locales.getCurrent(),
        labelAsHeader,
        format,
        columnCountLimit);
  }

  private static Object formatBigDecimal(Object number, Locale locale) {
    NumberFormat nf = NumberFormat.getNumberInstance(locale);
    nf.setMinimumFractionDigits(2);
    return nf.format(number);
  }

  private int[] getSelectedRows(int[] selectedRowsNumber, Map[] selectedRowsKeys, Tab tab) {
    if (selectedRowsKeys == null || selectedRowsKeys.length == 0) return new int[0];
    // selectedRowsNumber is the most performant so we use it when possible
    else if (selectedRowsNumber.length == selectedRowsKeys.length) return selectedRowsNumber;
    else {
      // find the rows from the selectedKeys

      // This has a poor performance, but it covers the case when the selected
      // rows are not loaded for the tab, something that can occurs if the user
      // select rows and afterwards reorder the list.
      try {
        int[] s = new int[selectedRowsKeys.length];
        List selectedKeys = Arrays.asList(selectedRowsKeys);
        int end = tab.getTableModel().getTotalSize();
        int x = 0;
        for (int i = 0; i < end; i++) {
          Map key = (Map) tab.getTableModel().getObjectAt(i);
          if (selectedKeys.contains(key)) {
            s[x] = i;
            x++;
          }
        }
        return s;
      } catch (Exception ex) {
        log.warn(XavaResources.getString("fails_selected"), ex);
        throw new XavaException("fails_selected");
      }
    }
  }
}
Example #9
0
/**
 * A simple RPC mechanism.
 *
 * <p>A <i>protocol</i> is a Java interface. All parameters and return types must be one of:
 *
 * <ul>
 *   <li>a primitive type, <code>boolean</code>, <code>byte</code>, <code>char</code>, <code>short
 *       </code>, <code>int</code>, <code>long</code>, <code>float</code>, <code>double</code>, or
 *       <code>void</code>; or
 *   <li>a {@link String}; or
 *   <li>a {@link Writable}; or
 *   <li>an array of the above types
 * </ul>
 *
 * All methods in the protocol should throw only IOException. No field data of the protocol instance
 * is transmitted.
 */
public class RPC {
  private static final Log LOG = LogFactory.getLog(RPC.class);

  private RPC() {} // no public ctor

  /** A method invocation, including the method name and its parameters. */
  private static class Invocation implements Writable, Configurable {
    private String methodName;
    private Class[] parameterClasses;
    private Object[] parameters;
    private Configuration conf;

    public Invocation() {}

    public Invocation(Method method, Object[] parameters) {
      this.methodName = method.getName();
      this.parameterClasses = method.getParameterTypes();
      this.parameters = parameters;
    }

    /** The name of the method invoked. */
    public String getMethodName() {
      return methodName;
    }

    /** The parameter classes. */
    public Class[] getParameterClasses() {
      return parameterClasses;
    }

    /** The parameter instances. */
    public Object[] getParameters() {
      return parameters;
    }

    public void readFields(DataInput in) throws IOException {
      methodName = UTF8.readString(in);
      parameters = new Object[in.readInt()];
      parameterClasses = new Class[parameters.length];
      ObjectWritable objectWritable = new ObjectWritable();
      for (int i = 0; i < parameters.length; i++) {
        parameters[i] = ObjectWritable.readObject(in, objectWritable, this.conf);
        parameterClasses[i] = objectWritable.getDeclaredClass();
      }
    }

    public void write(DataOutput out) throws IOException {
      ObjectWritable.writeStringCached(out, methodName);
      out.writeInt(parameterClasses.length);
      for (int i = 0; i < parameterClasses.length; i++) {
        ObjectWritable.writeObject(out, parameters[i], parameterClasses[i], conf);
      }
    }

    public String toString() {
      StringBuffer buffer = new StringBuffer();
      buffer.append(methodName);
      buffer.append("(");
      for (int i = 0; i < parameters.length; i++) {
        if (i != 0) buffer.append(", ");
        buffer.append(parameters[i]);
      }
      buffer.append(")");
      return buffer.toString();
    }

    public void setConf(Configuration conf) {
      this.conf = conf;
    }

    public Configuration getConf() {
      return this.conf;
    }
  }

  /* Cache a client using its socket factory as the hash key */
  private static class ClientCache {
    private Map<SocketFactory, Client> clients = new HashMap<SocketFactory, Client>();

    /**
     * Construct & cache an IPC client with the user-provided SocketFactory if no cached client
     * exists.
     *
     * @param conf Configuration
     * @return an IPC client
     */
    private synchronized Client getClient(Configuration conf, SocketFactory factory) {
      // Construct & cache client.  The configuration is only used for timeout,
      // and Clients have connection pools.  So we can either (a) lose some
      // connection pooling and leak sockets, or (b) use the same timeout for all
      // configurations.  Since the IPC is usually intended globally, not
      // per-job, we choose (a).
      Client client = clients.get(factory);
      if (client == null) {
        client = new Client(ObjectWritable.class, conf, factory);
        clients.put(factory, client);
      } else {
        client.incCount();
      }
      return client;
    }

    /**
     * Construct & cache an IPC client with the default SocketFactory if no cached client exists.
     *
     * @param conf Configuration
     * @return an IPC client
     */
    private synchronized Client getClient(Configuration conf) {
      return getClient(conf, SocketFactory.getDefault());
    }

    /**
     * Stop a RPC client connection A RPC client is closed only when its reference count becomes
     * zero.
     */
    private void stopClient(Client client) {
      synchronized (this) {
        client.decCount();
        if (client.isZeroReference()) {
          clients.remove(client.getSocketFactory());
        }
      }
      if (client.isZeroReference()) {
        client.stop();
      }
    }
  }

  private static ClientCache CLIENTS = new ClientCache();

  private static class Invoker implements InvocationHandler {
    private InetSocketAddress address;
    private UserGroupInformation ticket;
    private Client client;
    private boolean isClosed = false;
    private boolean needCheckDnsUpdate = false;
    private long timeLastDnsCheck = 0;
    private final long MIN_DNS_CHECK_INTERVAL_MSEC = 120 * 1000;
    private final int rpcTimeout;
    private final Class<?> protocol;

    public Invoker(
        InetSocketAddress address,
        UserGroupInformation ticket,
        Configuration conf,
        SocketFactory factory,
        int rpcTimeout,
        Class<?> protocol) {
      this.address = address;
      this.ticket = ticket;
      this.client = CLIENTS.getClient(conf, factory);
      this.rpcTimeout = rpcTimeout;
      this.protocol = protocol;
    }

    private synchronized InetSocketAddress getAddress() {
      if (needCheckDnsUpdate
          && address != null
          && address.getHostName() != null
          && System.currentTimeMillis() - this.timeLastDnsCheck > MIN_DNS_CHECK_INTERVAL_MSEC) {
        try {
          InetSocketAddress newAddr = NetUtils.resolveAddress(address);
          if (newAddr != null) {
            LOG.info("DNS change: " + newAddr);
            address = newAddr;
          }
        } finally {
          this.timeLastDnsCheck = System.currentTimeMillis();
        }
      }
      needCheckDnsUpdate = false;
      return address;
    }

    public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
      final boolean logDebug = LOG.isDebugEnabled();
      long startTime = 0;
      if (logDebug) {
        startTime = System.currentTimeMillis();
      }

      ObjectWritable value = null;
      try {
        value =
            (ObjectWritable)
                client.call(
                    new Invocation(method, args), getAddress(), protocol, ticket, rpcTimeout);
      } catch (RemoteException re) {
        throw re;
      } catch (ConnectException ce) {
        needCheckDnsUpdate = true;
        throw ce;
      } catch (NoRouteToHostException nrhe) {
        needCheckDnsUpdate = true;
        throw nrhe;
      } catch (PortUnreachableException pue) {
        needCheckDnsUpdate = true;
        throw pue;
      } catch (UnknownHostException uhe) {
        needCheckDnsUpdate = true;
        throw uhe;
      }
      if (logDebug) {
        long callTime = System.currentTimeMillis() - startTime;
        LOG.debug("Call: " + method.getName() + " " + callTime);
      }
      return value.get();
    }

    /* close the IPC client that's responsible for this invoker's RPCs */
    private synchronized void close() {
      if (!isClosed) {
        isClosed = true;
        CLIENTS.stopClient(client);
      }
    }
  }

  /**
   * An exception indicating that the client and server have incompatible versions. They are not
   * able to communicate with each other.
   */
  public static class VersionIncompatible extends IOException {
    private String interfaceName;
    private long clientVersion;
    private long serverVersion;

    /**
     * Create a version incompatible exception
     *
     * @param interfaceName the name of the protocol mismatch
     * @param clientVersion the client's version of the protocol
     * @param serverVersion the server's version of the protocol
     */
    public VersionIncompatible(String interfaceName, long clientVersion, long serverVersion) {
      super(
          "Protocol "
              + interfaceName
              + " version mismatch. (client = "
              + clientVersion
              + ", server = "
              + serverVersion
              + ")");
      this.interfaceName = interfaceName;
      this.clientVersion = clientVersion;
      this.serverVersion = serverVersion;
    }

    /**
     * Get the interface name
     *
     * @return the java class name (eg. org.apache.hadoop.mapred.InterTrackerProtocol)
     */
    public String getInterfaceName() {
      return interfaceName;
    }

    /** Get the client's preferred version */
    public long getClientVersion() {
      return clientVersion;
    }

    /** Get the server's agreed to version. */
    public long getServerVersion() {
      return serverVersion;
    }
  }

  /**
   * A version mismatch for the RPC protocol.
   *
   * <p>The client & server have different versions. But the server is not able to determine if they
   * are compatible mostly because the client is newer than the server. So the proxy is created and
   * the application client is left to decide if the client & server are compatible or not.
   */
  public static class VersionMismatch extends VersionIncompatible {
    private static final long serialVersionUID = 1L;
    private final VersionedProtocol proxy;

    /**
     * Create a version mismatch exception
     *
     * @param interfaceName the name of the protocol mismatch
     * @param clientVersion the client's version of the protocol
     * @param serverVersion the server's version of the protocol
     */
    public VersionMismatch(String interfaceName, long clientVersion, long serverVersion) {
      super(interfaceName, clientVersion, serverVersion);
      proxy = null;
    }

    /**
     * Create a version mismatch exception
     *
     * @param interfaceName the name of the protocol mismatch
     * @param clientVersion the client's version of the protocol
     * @param serverVersion the server's version of the protocol
     * @param proxy the proxy
     */
    public VersionMismatch(
        String interfaceName, long clientVersion, long serverVersion, VersionedProtocol proxy) {
      super(interfaceName, clientVersion, serverVersion);
      this.proxy = proxy;
    }

    /** Return the proxy */
    public VersionedProtocol getProxy() {
      return proxy;
    }
  }

  public static <T extends VersionedProtocol> T waitForProxy(
      Class<T> protocol, long clientVersion, InetSocketAddress addr, Configuration conf)
      throws IOException {
    return waitForProtocolProxy(protocol, clientVersion, addr, conf).getProxy();
  }

  public static <T extends VersionedProtocol> ProtocolProxy<T> waitForProtocolProxy(
      Class<T> protocol, long clientVersion, InetSocketAddress addr, Configuration conf)
      throws IOException {
    return waitForProtocolProxy(protocol, clientVersion, addr, conf, Long.MAX_VALUE);
  }

  public static <T extends VersionedProtocol> T waitForProxy(
      Class<T> protocol,
      long clientVersion,
      InetSocketAddress addr,
      Configuration conf,
      long timeout)
      throws IOException {
    return waitForProtocolProxy(protocol, clientVersion, addr, conf, timeout).getProxy();
  }

  public static <T extends VersionedProtocol> ProtocolProxy<T> waitForProtocolProxy(
      Class<T> protocol,
      long clientVersion,
      InetSocketAddress addr,
      Configuration conf,
      long timeout)
      throws IOException {
    return waitForProtocolProxy(protocol, clientVersion, addr, conf, timeout, 0);
  }

  /**
   * Get a proxy connection to a remote server
   *
   * @param protocol protocol class
   * @param clientVersion client version
   * @param addr remote address
   * @param conf configuration to use
   * @param connTimeout time in milliseconds before giving up
   * @param rpcTimeout timeout for each RPC
   * @return the proxy
   * @throws IOException if the far end through a RemoteException
   */
  public static <T extends VersionedProtocol> T waitForProxy(
      Class<T> protocol,
      long clientVersion,
      InetSocketAddress addr,
      Configuration conf,
      long connTimeout,
      int rpcTimeout)
      throws IOException {
    return waitForProtocolProxy(protocol, clientVersion, addr, conf, connTimeout, rpcTimeout)
        .getProxy();
  }

  /**
   * Get a proxy connection to a remote server
   *
   * @param protocol protocol class
   * @param clientVersion client version
   * @param addr remote address
   * @param conf configuration to use
   * @param rpcTimeout timeout for each RPC
   * @param timeout time in milliseconds before giving up
   * @return the proxy
   * @throws IOException if the far end through a RemoteException
   */
  static <T extends VersionedProtocol> ProtocolProxy<T> waitForProtocolProxy(
      Class<T> protocol,
      long clientVersion,
      InetSocketAddress addr,
      Configuration conf,
      long timeout,
      int rpcTimeout)
      throws IOException {
    long startTime = System.currentTimeMillis();
    UserGroupInformation ugi = null;
    try {
      ugi = UserGroupInformation.login(conf);
    } catch (LoginException le) {
      throw new RuntimeException("Couldn't login!");
    }
    IOException ioe;
    while (true) {
      try {
        return getProtocolProxy(
            protocol,
            clientVersion,
            addr,
            ugi,
            conf,
            NetUtils.getDefaultSocketFactory(conf),
            rpcTimeout);
      } catch (ConnectException se) { // namenode has not been started
        LOG.info("Server at " + addr + " not available yet, Zzzzz...");
        ioe = se;
      } catch (SocketTimeoutException te) { // namenode is busy
        LOG.info("Problem connecting to server: " + addr);
        ioe = te;
      }
      // check if timed out
      if (System.currentTimeMillis() - timeout >= startTime) {
        throw ioe;
      }

      // wait for retry
      try {
        Thread.sleep(1000);
      } catch (InterruptedException ie) {
        // IGNORE
      }
    }
  }

  /**
   * Construct a client-side proxy object that implements the named protocol, talking to a server at
   * the named address.
   */
  public static <T extends VersionedProtocol> T getProxy(
      Class<T> protocol,
      long clientVersion,
      InetSocketAddress addr,
      Configuration conf,
      SocketFactory factory)
      throws IOException {
    return getProtocolProxy(protocol, clientVersion, addr, conf, factory).getProxy();
  }

  /**
   * Construct a client-side protocol proxy that contains a set of server methods and a proxy object
   * implementing the named protocol, talking to a server at the named address.
   */
  public static <T extends VersionedProtocol> ProtocolProxy<T> getProtocolProxy(
      Class<T> protocol,
      long clientVersion,
      InetSocketAddress addr,
      Configuration conf,
      SocketFactory factory)
      throws IOException {
    UserGroupInformation ugi = null;
    try {
      ugi = UserGroupInformation.login(conf);
    } catch (LoginException le) {
      throw new RuntimeException("Couldn't login!");
    }
    return getProtocolProxy(protocol, clientVersion, addr, ugi, conf, factory);
  }

  /**
   * Construct a client-side proxy object that implements the named protocol, talking to a server at
   * the named address.
   */
  public static <T extends VersionedProtocol> T getProxy(
      Class<T> protocol,
      long clientVersion,
      InetSocketAddress addr,
      UserGroupInformation ticket,
      Configuration conf,
      SocketFactory factory)
      throws IOException {
    return getProtocolProxy(protocol, clientVersion, addr, ticket, conf, factory).getProxy();
  }

  /**
   * Construct a client-side protocol proxy that contains a set of server methods and a proxy object
   * implementing the named protocol, talking to a server at the named address.
   */
  public static <T extends VersionedProtocol> ProtocolProxy<T> getProtocolProxy(
      Class<T> protocol,
      long clientVersion,
      InetSocketAddress addr,
      UserGroupInformation ticket,
      Configuration conf,
      SocketFactory factory)
      throws IOException {
    return getProtocolProxy(protocol, clientVersion, addr, ticket, conf, factory, 0);
  }

  /**
   * Construct a client-side proxy that implements the named protocol, talking to a server at the
   * named address.
   *
   * @param protocol protocol
   * @param clientVersion client's version
   * @param addr server address
   * @param ticket security ticket
   * @param conf configuration
   * @param factory socket factory
   * @param rpcTimeout max time for each rpc; 0 means no timeout
   * @return the proxy
   * @throws IOException if any error occurs
   */
  public static <T extends VersionedProtocol> T getProxy(
      Class<T> protocol,
      long clientVersion,
      InetSocketAddress addr,
      UserGroupInformation ticket,
      Configuration conf,
      SocketFactory factory,
      int rpcTimeout)
      throws IOException {
    return getProtocolProxy(protocol, clientVersion, addr, ticket, conf, factory, rpcTimeout)
        .getProxy();
  }

  /**
   * Construct a client-side proxy that implements the named protocol, talking to a server at the
   * named address.
   *
   * @param protocol protocol
   * @param clientVersion client's version
   * @param addr server address
   * @param ticket security ticket
   * @param conf configuration
   * @param factory socket factory
   * @param rpcTimeout max time for each rpc; 0 means no timeout
   * @return the proxy
   * @throws IOException if any error occurs
   */
  @SuppressWarnings("unchecked")
  public static <T extends VersionedProtocol> ProtocolProxy<T> getProtocolProxy(
      Class<T> protocol,
      long clientVersion,
      InetSocketAddress addr,
      UserGroupInformation ticket,
      Configuration conf,
      SocketFactory factory,
      int rpcTimeout)
      throws IOException {
    T proxy =
        (T)
            Proxy.newProxyInstance(
                protocol.getClassLoader(),
                new Class[] {protocol},
                new Invoker(addr, ticket, conf, factory, rpcTimeout, protocol));
    String protocolName = protocol.getName();

    try {
      ProtocolSignature serverInfo =
          proxy.getProtocolSignature(
              protocolName, clientVersion, ProtocolSignature.getFingerprint(protocol.getMethods()));
      return new ProtocolProxy<T>(protocol, proxy, serverInfo.getMethods());
    } catch (RemoteException re) {
      IOException ioe = re.unwrapRemoteException(IOException.class);
      if (ioe.getMessage()
          .startsWith(IOException.class.getName() + ": " + NoSuchMethodException.class.getName())) {
        // Method getProtocolSignature not supported
        long serverVersion = proxy.getProtocolVersion(protocol.getName(), clientVersion);
        if (serverVersion == clientVersion) {
          return new ProtocolProxy<T>(protocol, proxy, null);
        }
        throw new VersionMismatch(protocolName, clientVersion, serverVersion, proxy);
      }
      throw re;
    }
  }

  /**
   * Construct a client-side proxy object with the default SocketFactory
   *
   * @param protocol
   * @param clientVersion
   * @param addr
   * @param conf
   * @return a proxy instance
   * @throws IOException
   */
  public static <T extends VersionedProtocol> T getProxy(
      Class<T> protocol, long clientVersion, InetSocketAddress addr, Configuration conf)
      throws IOException {
    return getProtocolProxy(protocol, clientVersion, addr, conf).getProxy();
  }

  /**
   * Construct a client-side proxy object with the default SocketFactory
   *
   * @param protocol
   * @param clientVersion
   * @param addr
   * @param conf
   * @return a proxy instance
   * @throws IOException
   */
  public static <T extends VersionedProtocol> ProtocolProxy<T> getProtocolProxy(
      Class<T> protocol, long clientVersion, InetSocketAddress addr, Configuration conf)
      throws IOException {

    return getProtocolProxy(
        protocol, clientVersion, addr, conf, NetUtils.getDefaultSocketFactory(conf));
  }

  /**
   * Stop this proxy and release its invoker's resource
   *
   * @param <T>
   * @param proxy the proxy to be stopped
   */
  public static <T extends VersionedProtocol> void stopProxy(T proxy) {
    if (proxy != null) {
      ((Invoker) Proxy.getInvocationHandler(proxy)).close();
    }
  }

  /**
   * Expert: Make multiple, parallel calls to a set of servers.
   *
   * @deprecated Use {@link #call(Method, Object[][], InetSocketAddress[], UserGroupInformation,
   *     Configuration)} instead
   */
  public static Object[] call(
      Method method, Object[][] params, InetSocketAddress[] addrs, Configuration conf)
      throws IOException {
    return call(method, params, addrs, null, conf);
  }

  /** Expert: Make multiple, parallel calls to a set of servers. */
  public static Object[] call(
      Method method,
      Object[][] params,
      InetSocketAddress[] addrs,
      UserGroupInformation ticket,
      Configuration conf)
      throws IOException {

    Invocation[] invocations = new Invocation[params.length];
    for (int i = 0; i < params.length; i++) invocations[i] = new Invocation(method, params[i]);
    Client client = CLIENTS.getClient(conf);
    try {
      Writable[] wrappedValues =
          client.call(invocations, addrs, method.getDeclaringClass(), ticket);

      if (method.getReturnType() == Void.TYPE) {
        return null;
      }

      Object[] values = (Object[]) Array.newInstance(method.getReturnType(), wrappedValues.length);
      for (int i = 0; i < values.length; i++)
        if (wrappedValues[i] != null) values[i] = ((ObjectWritable) wrappedValues[i]).get();

      return values;
    } finally {
      CLIENTS.stopClient(client);
    }
  }

  static Client getClient(Configuration conf, SocketFactory socketFactory) {
    return CLIENTS.getClient(conf, socketFactory);
  }

  /** Construct a server for a protocol implementation instance listening on a port and address. */
  public static Server getServer(
      final Object instance, final String bindAddress, final int port, Configuration conf)
      throws IOException {
    return getServer(instance, bindAddress, port, 1, false, conf);
  }

  /** Construct a server for a protocol implementation instance listening on a port and address. */
  public static Server getServer(
      final Object instance,
      final String bindAddress,
      final int port,
      final int numHandlers,
      final boolean verbose,
      Configuration conf)
      throws IOException {
    return getServer(instance, bindAddress, port, numHandlers, verbose, conf, true);
  }

  /** Construct a server for a protocol implementation instance listening on a port and address. */
  public static Server getServer(
      final Object instance,
      final String bindAddress,
      final int port,
      final int numHandlers,
      final boolean verbose,
      Configuration conf,
      boolean supportOldJobConf)
      throws IOException {
    return new Server(instance, conf, bindAddress, port, numHandlers, verbose, supportOldJobConf);
  }

  /** An RPC Server. */
  public static class Server extends org.apache.hadoop.ipc.Server {
    private Object instance;
    private boolean verbose;
    private boolean authorize = false;

    /**
     * Construct an RPC server.
     *
     * @param instance the instance whose methods will be called
     * @param conf the configuration to use
     * @param bindAddress the address to bind on to listen for connection
     * @param port the port to listen for connections on
     */
    public Server(Object instance, Configuration conf, String bindAddress, int port)
        throws IOException {
      this(instance, conf, bindAddress, port, 1, false);
    }

    private static String classNameBase(String className) {
      String[] names = className.split("\\.", -1);
      if (names == null || names.length == 0) {
        return className;
      }
      return names[names.length - 1];
    }

    /**
     * Construct an RPC server.
     *
     * @param instance the instance whose methods will be called
     * @param conf the configuration to use
     * @param bindAddress the address to bind on to listen for connection
     * @param port the port to listen for connections on
     * @param numHandlers the number of method handler threads to run
     * @param verbose whether each call should be logged
     */
    public Server(
        Object instance,
        Configuration conf,
        String bindAddress,
        int port,
        int numHandlers,
        boolean verbose)
        throws IOException {
      this(instance, conf, bindAddress, port, numHandlers, verbose, true);
    }

    /**
     * Construct an RPC server.
     *
     * @param instance the instance whose methods will be called
     * @param conf the configuration to use
     * @param bindAddress the address to bind on to listen for connection
     * @param port the port to listen for connections on
     * @param numHandlers the number of method handler threads to run
     * @param verbose whether each call should be logged
     * @param supportOldJobConf supports server to deserialize old job conf
     */
    public Server(
        Object instance,
        Configuration conf,
        String bindAddress,
        int port,
        int numHandlers,
        boolean verbose,
        boolean supportOldJobConf)
        throws IOException {
      super(
          bindAddress,
          port,
          Invocation.class,
          numHandlers,
          conf,
          classNameBase(instance.getClass().getName()),
          supportOldJobConf);
      this.instance = instance;
      this.verbose = verbose;
      this.authorize =
          conf.getBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false);
    }

    public Writable call(Class<?> protocol, Writable param, long receivedTime) throws IOException {
      try {
        Invocation call = (Invocation) param;
        if (verbose) log("Call: " + call);

        Method method = protocol.getMethod(call.getMethodName(), call.getParameterClasses());
        method.setAccessible(true);

        int qTime = (int) (System.currentTimeMillis() - receivedTime);
        long startNanoTime = System.nanoTime();
        Object value = method.invoke(instance, call.getParameters());
        long processingMicroTime = (System.nanoTime() - startNanoTime) / 1000;
        if (LOG.isDebugEnabled()) {
          LOG.debug(
              "Served: "
                  + call.getMethodName()
                  + " queueTime (millisec)= "
                  + qTime
                  + " procesingTime (microsec)= "
                  + processingMicroTime);
        }
        rpcMetrics.rpcQueueTime.inc(qTime);
        rpcMetrics.rpcProcessingTime.inc(processingMicroTime);

        MetricsTimeVaryingRate m =
            (MetricsTimeVaryingRate) rpcMetrics.registry.get(call.getMethodName());
        if (m == null) {
          try {
            m = new MetricsTimeVaryingRate(call.getMethodName(), rpcMetrics.registry);
          } catch (IllegalArgumentException iae) {
            // the metrics has been registered; re-fetch the handle
            LOG.debug("Error register " + call.getMethodName(), iae);
            m = (MetricsTimeVaryingRate) rpcMetrics.registry.get(call.getMethodName());
          }
        }
        // record call time in microseconds
        m.inc(processingMicroTime);

        if (verbose) log("Return: " + value);

        return new ObjectWritable(method.getReturnType(), value);

      } catch (InvocationTargetException e) {
        Throwable target = e.getTargetException();
        if (target instanceof IOException) {
          throw (IOException) target;
        } else {
          IOException ioe = new IOException(target.toString());
          ioe.setStackTrace(target.getStackTrace());
          throw ioe;
        }
      } catch (Throwable e) {
        if (!(e instanceof IOException)) {
          LOG.error("Unexpected throwable object ", e);
        }
        IOException ioe = new IOException(e.toString());
        ioe.setStackTrace(e.getStackTrace());
        throw ioe;
      }
    }

    @Override
    public void authorize(Subject user, ConnectionHeader connection) throws AuthorizationException {
      if (authorize) {
        Class<?> protocol = null;
        try {
          protocol = getProtocolClass(connection.getProtocol(), getConf());
        } catch (ClassNotFoundException cfne) {
          throw new AuthorizationException("Unknown protocol: " + connection.getProtocol());
        }
        ServiceAuthorizationManager.authorize(user, protocol);
      }
    }
  }

  private static void log(String value) {
    if (value != null && value.length() > 55) value = value.substring(0, 55) + "...";
    LOG.info(value);
  }
}
Example #10
0
/** An RpcEngine implementation for Writable data. */
@InterfaceStability.Evolving
public class WritableRpcEngine implements RpcEngine {
  private static final Log LOG = LogFactory.getLog(RPC.class);

  // writableRpcVersion should be updated if there is a change
  // in format of the rpc messages.

  // 2L - added declared class to Invocation
  public static final long writableRpcVersion = 2L;

  /** Whether or not this class has been initialized. */
  private static boolean isInitialized = false;

  static {
    ensureInitialized();
  }

  /** Initialize this class if it isn't already. */
  public static synchronized void ensureInitialized() {
    if (!isInitialized) {
      initialize();
    }
  }

  /** Register the rpcRequest deserializer for WritableRpcEngine */
  private static synchronized void initialize() {
    org.apache.hadoop.ipc.Server.registerProtocolEngine(
        RPC.RpcKind.RPC_WRITABLE, Invocation.class, new Server.WritableRpcInvoker());
    isInitialized = true;
  }

  /** A method invocation, including the method name and its parameters. */
  private static class Invocation implements Writable, Configurable {
    private String methodName;
    private Class<?>[] parameterClasses;
    private Object[] parameters;
    private Configuration conf;
    private long clientVersion;
    private int clientMethodsHash;
    private String declaringClassProtocolName;

    // This could be different from static writableRpcVersion when received
    // at server, if client is using a different version.
    private long rpcVersion;

    @SuppressWarnings("unused") // called when deserializing an invocation
    public Invocation() {}

    public Invocation(Method method, Object[] parameters) {
      this.methodName = method.getName();
      this.parameterClasses = method.getParameterTypes();
      this.parameters = parameters;
      rpcVersion = writableRpcVersion;
      if (method.getDeclaringClass().equals(VersionedProtocol.class)) {
        // VersionedProtocol is exempted from version check.
        clientVersion = 0;
        clientMethodsHash = 0;
      } else {
        this.clientVersion = RPC.getProtocolVersion(method.getDeclaringClass());
        this.clientMethodsHash =
            ProtocolSignature.getFingerprint(method.getDeclaringClass().getMethods());
      }
      this.declaringClassProtocolName = RPC.getProtocolName(method.getDeclaringClass());
    }

    /** The name of the method invoked. */
    public String getMethodName() {
      return methodName;
    }

    /** The parameter classes. */
    public Class<?>[] getParameterClasses() {
      return parameterClasses;
    }

    /** The parameter instances. */
    public Object[] getParameters() {
      return parameters;
    }

    private long getProtocolVersion() {
      return clientVersion;
    }

    @SuppressWarnings("unused")
    private int getClientMethodsHash() {
      return clientMethodsHash;
    }

    /**
     * Returns the rpc version used by the client.
     *
     * @return rpcVersion
     */
    public long getRpcVersion() {
      return rpcVersion;
    }

    @SuppressWarnings("deprecation")
    public void readFields(DataInput in) throws IOException {
      rpcVersion = in.readLong();
      declaringClassProtocolName = UTF8.readString(in);
      methodName = UTF8.readString(in);
      clientVersion = in.readLong();
      clientMethodsHash = in.readInt();
      parameters = new Object[in.readInt()];
      parameterClasses = new Class[parameters.length];
      ObjectWritable objectWritable = new ObjectWritable();
      for (int i = 0; i < parameters.length; i++) {
        parameters[i] = ObjectWritable.readObject(in, objectWritable, this.conf);
        parameterClasses[i] = objectWritable.getDeclaredClass();
      }
    }

    @SuppressWarnings("deprecation")
    public void write(DataOutput out) throws IOException {
      out.writeLong(rpcVersion);
      UTF8.writeString(out, declaringClassProtocolName);
      UTF8.writeString(out, methodName);
      out.writeLong(clientVersion);
      out.writeInt(clientMethodsHash);
      out.writeInt(parameterClasses.length);
      for (int i = 0; i < parameterClasses.length; i++) {
        ObjectWritable.writeObject(out, parameters[i], parameterClasses[i], conf, true);
      }
    }

    public String toString() {
      StringBuilder buffer = new StringBuilder();
      buffer.append(methodName);
      buffer.append("(");
      for (int i = 0; i < parameters.length; i++) {
        if (i != 0) buffer.append(", ");
        buffer.append(parameters[i]);
      }
      buffer.append(")");
      buffer.append(", rpc version=" + rpcVersion);
      buffer.append(", client version=" + clientVersion);
      buffer.append(", methodsFingerPrint=" + clientMethodsHash);
      return buffer.toString();
    }

    public void setConf(Configuration conf) {
      this.conf = conf;
    }

    public Configuration getConf() {
      return this.conf;
    }
  }

  private static ClientCache CLIENTS = new ClientCache();

  private static class Invoker implements RpcInvocationHandler {
    private Client.ConnectionId remoteId;
    private Client client;
    private boolean isClosed = false;

    public Invoker(
        Class<?> protocol,
        InetSocketAddress address,
        UserGroupInformation ticket,
        Configuration conf,
        SocketFactory factory,
        int rpcTimeout)
        throws IOException {
      this.remoteId =
          Client.ConnectionId.getConnectionId(address, protocol, ticket, rpcTimeout, conf);
      this.client = CLIENTS.getClient(conf, factory);
    }

    public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
      long startTime = 0;
      if (LOG.isDebugEnabled()) {
        startTime = Time.now();
      }

      ObjectWritable value =
          (ObjectWritable)
              client.call(RPC.RpcKind.RPC_WRITABLE, new Invocation(method, args), remoteId);
      if (LOG.isDebugEnabled()) {
        long callTime = Time.now() - startTime;
        LOG.debug("Call: " + method.getName() + " " + callTime);
      }
      return value.get();
    }

    /* close the IPC client that's responsible for this invoker's RPCs */
    public synchronized void close() {
      if (!isClosed) {
        isClosed = true;
        CLIENTS.stopClient(client);
      }
    }

    @Override
    public ConnectionId getConnectionId() {
      return remoteId;
    }
  }

  // for unit testing only
  @InterfaceAudience.Private
  @InterfaceStability.Unstable
  static Client getClient(Configuration conf) {
    return CLIENTS.getClient(conf);
  }

  /**
   * Construct a client-side proxy object that implements the named protocol, talking to a server at
   * the named address.
   *
   * @param <T>
   */
  @Override
  @SuppressWarnings("unchecked")
  public <T> ProtocolProxy<T> getProxy(
      Class<T> protocol,
      long clientVersion,
      InetSocketAddress addr,
      UserGroupInformation ticket,
      Configuration conf,
      SocketFactory factory,
      int rpcTimeout,
      RetryPolicy connectionRetryPolicy)
      throws IOException {

    if (connectionRetryPolicy != null) {
      throw new UnsupportedOperationException(
          "Not supported: connectionRetryPolicy=" + connectionRetryPolicy);
    }

    T proxy =
        (T)
            Proxy.newProxyInstance(
                protocol.getClassLoader(),
                new Class[] {protocol},
                new Invoker(protocol, addr, ticket, conf, factory, rpcTimeout));
    return new ProtocolProxy<T>(protocol, proxy, true);
  }

  /* Construct a server for a protocol implementation instance listening on a
   * port and address. */
  @Override
  public RPC.Server getServer(
      Class<?> protocolClass,
      Object protocolImpl,
      String bindAddress,
      int port,
      int numHandlers,
      int numReaders,
      int queueSizePerHandler,
      boolean verbose,
      Configuration conf,
      SecretManager<? extends TokenIdentifier> secretManager,
      String portRangeConfig)
      throws IOException {
    return new Server(
        protocolClass,
        protocolImpl,
        conf,
        bindAddress,
        port,
        numHandlers,
        numReaders,
        queueSizePerHandler,
        verbose,
        secretManager,
        portRangeConfig);
  }

  /** An RPC Server. */
  public static class Server extends RPC.Server {
    /**
     * Construct an RPC server.
     *
     * @param instance the instance whose methods will be called
     * @param conf the configuration to use
     * @param bindAddress the address to bind on to listen for connection
     * @param port the port to listen for connections on
     * @deprecated Use #Server(Class, Object, Configuration, String, int)
     */
    @Deprecated
    public Server(Object instance, Configuration conf, String bindAddress, int port)
        throws IOException {
      this(null, instance, conf, bindAddress, port);
    }

    /**
     * Construct an RPC server.
     *
     * @param protocolClass class
     * @param protocolImpl the instance whose methods will be called
     * @param conf the configuration to use
     * @param bindAddress the address to bind on to listen for connection
     * @param port the port to listen for connections on
     */
    public Server(
        Class<?> protocolClass,
        Object protocolImpl,
        Configuration conf,
        String bindAddress,
        int port)
        throws IOException {
      this(protocolClass, protocolImpl, conf, bindAddress, port, 1, -1, -1, false, null, null);
    }

    /**
     * Construct an RPC server.
     *
     * @param protocolImpl the instance whose methods will be called
     * @param conf the configuration to use
     * @param bindAddress the address to bind on to listen for connection
     * @param port the port to listen for connections on
     * @param numHandlers the number of method handler threads to run
     * @param verbose whether each call should be logged
     * @deprecated use Server#Server(Class, Object, Configuration, String, int, int, int, int,
     *     boolean, SecretManager)
     */
    @Deprecated
    public Server(
        Object protocolImpl,
        Configuration conf,
        String bindAddress,
        int port,
        int numHandlers,
        int numReaders,
        int queueSizePerHandler,
        boolean verbose,
        SecretManager<? extends TokenIdentifier> secretManager)
        throws IOException {
      this(
          null,
          protocolImpl,
          conf,
          bindAddress,
          port,
          numHandlers,
          numReaders,
          queueSizePerHandler,
          verbose,
          secretManager,
          null);
    }

    /**
     * Construct an RPC server.
     *
     * @param protocolClass - the protocol being registered can be null for compatibility with old
     *     usage (see below for details)
     * @param protocolImpl the protocol impl that will be called
     * @param conf the configuration to use
     * @param bindAddress the address to bind on to listen for connection
     * @param port the port to listen for connections on
     * @param numHandlers the number of method handler threads to run
     * @param verbose whether each call should be logged
     */
    public Server(
        Class<?> protocolClass,
        Object protocolImpl,
        Configuration conf,
        String bindAddress,
        int port,
        int numHandlers,
        int numReaders,
        int queueSizePerHandler,
        boolean verbose,
        SecretManager<? extends TokenIdentifier> secretManager,
        String portRangeConfig)
        throws IOException {
      super(
          bindAddress,
          port,
          null,
          numHandlers,
          numReaders,
          queueSizePerHandler,
          conf,
          classNameBase(protocolImpl.getClass().getName()),
          secretManager,
          portRangeConfig);

      this.verbose = verbose;

      Class<?>[] protocols;
      if (protocolClass == null) { // derive protocol from impl
        /*
         * In order to remain compatible with the old usage where a single
         * target protocolImpl is suppled for all protocol interfaces, and
         * the protocolImpl is derived from the protocolClass(es)
         * we register all interfaces extended by the protocolImpl
         */
        protocols = RPC.getProtocolInterfaces(protocolImpl.getClass());

      } else {
        if (!protocolClass.isAssignableFrom(protocolImpl.getClass())) {
          throw new IOException(
              "protocolClass "
                  + protocolClass
                  + " is not implemented by protocolImpl which is of class "
                  + protocolImpl.getClass());
        }
        // register protocol class and its super interfaces
        registerProtocolAndImpl(RPC.RpcKind.RPC_WRITABLE, protocolClass, protocolImpl);
        protocols = RPC.getProtocolInterfaces(protocolClass);
      }
      for (Class<?> p : protocols) {
        if (!p.equals(VersionedProtocol.class)) {
          registerProtocolAndImpl(RPC.RpcKind.RPC_WRITABLE, p, protocolImpl);
        }
      }
    }

    private static void log(String value) {
      if (value != null && value.length() > 55) value = value.substring(0, 55) + "...";
      LOG.info(value);
    }

    static class WritableRpcInvoker implements RpcInvoker {

      @Override
      public Writable call(
          org.apache.hadoop.ipc.RPC.Server server,
          String protocolName,
          Writable rpcRequest,
          long receivedTime)
          throws IOException {
        try {
          Invocation call = (Invocation) rpcRequest;
          if (server.verbose) log("Call: " + call);

          // Verify rpc version
          if (call.getRpcVersion() != writableRpcVersion) {
            // Client is using a different version of WritableRpc
            throw new IOException(
                "WritableRpc version mismatch, client side version="
                    + call.getRpcVersion()
                    + ", server side version="
                    + writableRpcVersion);
          }

          long clientVersion = call.getProtocolVersion();
          final String protoName;
          ProtoClassProtoImpl protocolImpl;
          if (call.declaringClassProtocolName.equals(VersionedProtocol.class.getName())) {
            // VersionProtocol methods are often used by client to figure out
            // which version of protocol to use.
            //
            // Versioned protocol methods should go the protocolName protocol
            // rather than the declaring class of the method since the
            // the declaring class is VersionedProtocol which is not
            // registered directly.
            // Send the call to the highest  protocol version
            VerProtocolImpl highest =
                server.getHighestSupportedProtocol(RPC.RpcKind.RPC_WRITABLE, protocolName);
            if (highest == null) {
              throw new IOException("Unknown protocol: " + protocolName);
            }
            protocolImpl = highest.protocolTarget;
          } else {
            protoName = call.declaringClassProtocolName;

            // Find the right impl for the protocol based on client version.
            ProtoNameVer pv = new ProtoNameVer(call.declaringClassProtocolName, clientVersion);
            protocolImpl = server.getProtocolImplMap(RPC.RpcKind.RPC_WRITABLE).get(pv);
            if (protocolImpl == null) { // no match for Protocol AND Version
              VerProtocolImpl highest =
                  server.getHighestSupportedProtocol(RPC.RpcKind.RPC_WRITABLE, protoName);
              if (highest == null) {
                throw new IOException("Unknown protocol: " + protoName);
              } else { // protocol supported but not the version that client wants
                throw new RPC.VersionMismatch(protoName, clientVersion, highest.version);
              }
            }
          }

          // Invoke the protocol method

          long startTime = Time.now();
          Method method =
              protocolImpl.protocolClass.getMethod(
                  call.getMethodName(), call.getParameterClasses());
          method.setAccessible(true);
          server.rpcDetailedMetrics.init(protocolImpl.protocolClass);
          Object value = method.invoke(protocolImpl.protocolImpl, call.getParameters());
          int processingTime = (int) (Time.now() - startTime);
          int qTime = (int) (startTime - receivedTime);
          if (LOG.isDebugEnabled()) {
            LOG.debug(
                "Served: "
                    + call.getMethodName()
                    + " queueTime= "
                    + qTime
                    + " procesingTime= "
                    + processingTime);
          }
          server.rpcMetrics.addRpcQueueTime(qTime);
          server.rpcMetrics.addRpcProcessingTime(processingTime);
          server.rpcDetailedMetrics.addProcessingTime(call.getMethodName(), processingTime);
          if (server.verbose) log("Return: " + value);

          return new ObjectWritable(method.getReturnType(), value);

        } catch (InvocationTargetException e) {
          Throwable target = e.getTargetException();
          if (target instanceof IOException) {
            throw (IOException) target;
          } else {
            IOException ioe = new IOException(target.toString());
            ioe.setStackTrace(target.getStackTrace());
            throw ioe;
          }
        } catch (Throwable e) {
          if (!(e instanceof IOException)) {
            LOG.error("Unexpected throwable object ", e);
          }
          IOException ioe = new IOException(e.toString());
          ioe.setStackTrace(e.getStackTrace());
          throw ioe;
        }
      }
    }
  }

  @Override
  public ProtocolProxy<ProtocolMetaInfoPB> getProtocolMetaInfoProxy(
      ConnectionId connId, Configuration conf, SocketFactory factory) throws IOException {
    throw new UnsupportedOperationException("This proxy is not supported");
  }
}
/** @author stone */
public class LSATApplication extends javax.swing.JFrame {
  // private user variables declaration
  private Project theProject;
  private JDesktopPane theDesktop;
  private DocumentViewerFrame documentViewerTable;
  private LSAResults currentResults;
  private DebugFrame debugFrame;
  private Log log = LogFactory.getLog();
  private LSATPreferences thePrefs = LSATPreferencesFactory.getPrefs(false);
  private DocumentFrameTableModel docFrameTableModel;
  private File lastPath;
  // end private user variables declaration

  /** Creates new form LSATApplication */
  public LSATApplication() {
    initComponents();
    initUserComponents();
    setWindowSizeAndTitle();
  }

  /**
   * This method is called from within the constructor to initialize the form. WARNING: Do NOT
   * modify this code. The content of this method is always regenerated by the Form Editor.
   */
  // <editor-fold defaultstate="collapsed" desc=" Generated Code ">//GEN-BEGIN:initComponents
  private void initComponents() {
    jMenuBar = new javax.swing.JMenuBar();
    jMenuFile = new javax.swing.JMenu();
    jMenuItemNewProject = new javax.swing.JMenuItem();
    jMenuItemLoadProject = new javax.swing.JMenuItem();
    jMenuItemSaveProject = new javax.swing.JMenuItem();
    jMenuItemCloseProject = new javax.swing.JMenuItem();
    jSeparator = new javax.swing.JSeparator();
    jMenuItemImport = new javax.swing.JMenuItem();
    jSeparator1 = new javax.swing.JSeparator();
    jMenuItemQuit = new javax.swing.JMenuItem();
    jMenuEdit = new javax.swing.JMenu();
    jMenuItemCut = new javax.swing.JMenuItem();
    jMenuItemCopy = new javax.swing.JMenuItem();
    jMenuItemPaste = new javax.swing.JMenuItem();
    jMenuView = new javax.swing.JMenu();
    jMenuItemViewDocuments = new javax.swing.JMenuItem();
    jMenuItemViewDebug = new javax.swing.JMenuItem();
    jMenuLSA = new javax.swing.JMenu();
    jMenuItemPerformLSA = new javax.swing.JMenuItem();
    jSeparator2 = new javax.swing.JSeparator();
    jMenuItemViewLSAResults = new javax.swing.JMenuItem();
    jMenuItemViewVisualResults = new javax.swing.JMenuItem();
    jSeparator3 = new javax.swing.JSeparator();
    jMenuItemLoadLSAResults = new javax.swing.JMenuItem();
    jMenuItemSaveLSAResults = new javax.swing.JMenuItem();
    jMenuItemDumpChunks = new javax.swing.JMenuItem();
    jMenuItemLogGraphingStats = new javax.swing.JMenuItem();
    jMenuItemLogRSGraphingStats = new javax.swing.JMenuItem();
    jMenuPreferences = new javax.swing.JMenu();
    jMenuItemShowPreferences = new javax.swing.JMenuItem();

    getContentPane().setLayout(null);

    setDefaultCloseOperation(javax.swing.WindowConstants.EXIT_ON_CLOSE);
    jMenuFile.setText("File");
    jMenuItemNewProject.setText("New Project");
    jMenuItemNewProject.addActionListener(
        new java.awt.event.ActionListener() {
          public void actionPerformed(java.awt.event.ActionEvent evt) {
            jMenuItemNewProjectActionPerformed(evt);
          }
        });

    jMenuFile.add(jMenuItemNewProject);

    jMenuItemLoadProject.setText("Load Project");
    jMenuItemLoadProject.addActionListener(
        new java.awt.event.ActionListener() {
          public void actionPerformed(java.awt.event.ActionEvent evt) {
            jMenuItemLoadProjectActionPerformed(evt);
          }
        });

    jMenuFile.add(jMenuItemLoadProject);

    jMenuItemSaveProject.setText("Save Project");
    jMenuItemSaveProject.setEnabled(false);
    jMenuItemSaveProject.addActionListener(
        new java.awt.event.ActionListener() {
          public void actionPerformed(java.awt.event.ActionEvent evt) {
            jMenuItemSaveProjectActionPerformed(evt);
          }
        });

    jMenuFile.add(jMenuItemSaveProject);

    jMenuItemCloseProject.setText("Close Project");
    jMenuItemCloseProject.setEnabled(false);
    jMenuFile.add(jMenuItemCloseProject);

    jMenuFile.add(jSeparator);

    jMenuItemImport.setText("Import");
    jMenuItemImport.setEnabled(false);
    jMenuItemImport.addActionListener(
        new java.awt.event.ActionListener() {
          public void actionPerformed(java.awt.event.ActionEvent evt) {
            jMenuItemImportActionPerformed(evt);
          }
        });

    jMenuFile.add(jMenuItemImport);

    jMenuFile.add(jSeparator1);

    jMenuItemQuit.setText("Quit");
    jMenuItemQuit.addActionListener(
        new java.awt.event.ActionListener() {
          public void actionPerformed(java.awt.event.ActionEvent evt) {
            jMenuItemQuitActionPerformed(evt);
          }
        });

    jMenuFile.add(jMenuItemQuit);

    jMenuBar.add(jMenuFile);

    jMenuEdit.setText("Edit");
    jMenuItemCut.setText("Cut");
    jMenuItemCut.addActionListener(
        new java.awt.event.ActionListener() {
          public void actionPerformed(java.awt.event.ActionEvent evt) {
            jMenuItemCutActionPerformed(evt);
          }
        });

    jMenuEdit.add(jMenuItemCut);

    jMenuItemCopy.setText("Copy");
    jMenuEdit.add(jMenuItemCopy);

    jMenuItemPaste.setText("Paste");
    jMenuEdit.add(jMenuItemPaste);

    jMenuBar.add(jMenuEdit);

    jMenuView.setText("View");
    jMenuView.addActionListener(
        new java.awt.event.ActionListener() {
          public void actionPerformed(java.awt.event.ActionEvent evt) {
            jMenuViewActionPerformed(evt);
          }
        });

    jMenuItemViewDocuments.setText("Documents");
    jMenuItemViewDocuments.setEnabled(false);
    jMenuItemViewDocuments.addActionListener(
        new java.awt.event.ActionListener() {
          public void actionPerformed(java.awt.event.ActionEvent evt) {
            jMenuItemViewDocumentsActionPerformed(evt);
          }
        });

    jMenuView.add(jMenuItemViewDocuments);

    jMenuItemViewDebug.setText("View Debug");
    jMenuItemViewDebug.addActionListener(
        new java.awt.event.ActionListener() {
          public void actionPerformed(java.awt.event.ActionEvent evt) {
            jMenuItemViewDebugActionPerformed(evt);
          }
        });

    jMenuView.add(jMenuItemViewDebug);

    jMenuBar.add(jMenuView);

    jMenuLSA.setText("LSA");
    jMenuItemPerformLSA.setText("Perform LSA");
    jMenuItemPerformLSA.addActionListener(
        new java.awt.event.ActionListener() {
          public void actionPerformed(java.awt.event.ActionEvent evt) {
            jMenuItemPerformLSAActionPerformed(evt);
          }
        });

    jMenuLSA.add(jMenuItemPerformLSA);

    jMenuLSA.add(jSeparator2);

    jMenuItemViewLSAResults.setText("View LSA Results");
    jMenuItemViewLSAResults.addActionListener(
        new java.awt.event.ActionListener() {
          public void actionPerformed(java.awt.event.ActionEvent evt) {
            jMenuItemViewLSAResultsActionPerformed(evt);
          }
        });

    jMenuLSA.add(jMenuItemViewLSAResults);

    jMenuItemViewVisualResults.setText("View results graph");
    jMenuItemViewVisualResults.addActionListener(
        new java.awt.event.ActionListener() {
          public void actionPerformed(java.awt.event.ActionEvent evt) {
            jMenuItemViewVisualResultsActionPerformed(evt);
          }
        });

    jMenuLSA.add(jMenuItemViewVisualResults);

    jMenuLSA.add(jSeparator3);

    jMenuItemLoadLSAResults.setText("Load LSA Results");
    jMenuItemLoadLSAResults.addActionListener(
        new java.awt.event.ActionListener() {
          public void actionPerformed(java.awt.event.ActionEvent evt) {
            jMenuItemLoadLSAResultsActionPerformed(evt);
          }
        });

    jMenuLSA.add(jMenuItemLoadLSAResults);

    jMenuItemSaveLSAResults.setText("Save LSA Results");
    jMenuItemSaveLSAResults.addActionListener(
        new java.awt.event.ActionListener() {
          public void actionPerformed(java.awt.event.ActionEvent evt) {
            jMenuItemSaveLSAResultsActionPerformed(evt);
          }
        });

    jMenuLSA.add(jMenuItemSaveLSAResults);

    jMenuItemDumpChunks.setText("Dump Chunks Belonging to Class");
    jMenuItemDumpChunks.addActionListener(
        new java.awt.event.ActionListener() {
          public void actionPerformed(java.awt.event.ActionEvent evt) {
            jMenuItemDumpChunksActionPerformed(evt);
          }
        });

    jMenuLSA.add(jMenuItemDumpChunks);

    jMenuItemLogGraphingStats.setText("Log Graphing Stats");
    jMenuItemLogGraphingStats.addActionListener(
        new java.awt.event.ActionListener() {
          public void actionPerformed(java.awt.event.ActionEvent evt) {
            jMenuItemLogGraphingStatsActionPerformed(evt);
          }
        });
    jMenuItemLogGraphingStats.addMouseListener(
        new java.awt.event.MouseAdapter() {
          public void mouseClicked(java.awt.event.MouseEvent evt) {
            jMenuItemLogGraphingStatsMouseClicked(evt);
          }
        });

    jMenuLSA.add(jMenuItemLogGraphingStats);

    jMenuItemLogRSGraphingStats.setText("Log RS Graphing Stats");
    jMenuItemLogRSGraphingStats.addActionListener(
        new java.awt.event.ActionListener() {
          public void actionPerformed(java.awt.event.ActionEvent evt) {
            jMenuItemLogRSGraphingStatsActionPerformed(evt);
          }
        });
    jMenuItemLogRSGraphingStats.addMouseListener(
        new java.awt.event.MouseAdapter() {
          public void mouseClicked(java.awt.event.MouseEvent evt) {
            jMenuItemLogRSGraphingStatsMouseClicked(evt);
          }
        });

    jMenuLSA.add(jMenuItemLogRSGraphingStats);

    jMenuBar.add(jMenuLSA);

    jMenuPreferences.setText("Preferences");
    jMenuItemShowPreferences.setText("Show preferences dialog");
    jMenuItemShowPreferences.addActionListener(
        new java.awt.event.ActionListener() {
          public void actionPerformed(java.awt.event.ActionEvent evt) {
            jMenuItemShowPreferencesActionPerformed(evt);
          }
        });

    jMenuPreferences.add(jMenuItemShowPreferences);

    jMenuBar.add(jMenuPreferences);

    setJMenuBar(jMenuBar);

    pack();
  } // </editor-fold>//GEN-END:initComponents

  private void jMenuItemLogRSGraphingStatsMouseClicked(
      java.awt.event.MouseEvent evt) // GEN-FIRST:event_jMenuItemLogRSGraphingStatsMouseClicked
      { // GEN-HEADEREND:event_jMenuItemLogRSGraphingStatsMouseClicked
    // TODO add your handling code here:
  } // GEN-LAST:event_jMenuItemLogRSGraphingStatsMouseClicked

  private void jMenuItemLogRSGraphingStatsActionPerformed(
      java.awt.event.ActionEvent evt) // GEN-FIRST:event_jMenuItemLogRSGraphingStatsActionPerformed
      { // GEN-HEADEREND:event_jMenuItemLogRSGraphingStatsActionPerformed
    if (currentResults != null) {
      this.currentResults.printGraphingStatsFromReqSimilieData();
    }
  } // GEN-LAST:event_jMenuItemLogRSGraphingStatsActionPerformed

  private void jMenuItemViewVisualResultsActionPerformed(
      java.awt.event.ActionEvent evt) // GEN-FIRST:event_jMenuItemViewVisualResultsActionPerformed
      { // GEN-HEADEREND:event_jMenuItemViewVisualResultsActionPerformed
    if (this.currentResults != null) {
      VisualAnalysisFrame f = new VisualAnalysisFrame(this.currentResults, this.theProject);
      theDesktop.add(f);
    }
  } // GEN-LAST:event_jMenuItemViewVisualResultsActionPerformed

  private void jMenuItemLoadProjectActionPerformed(
      java.awt.event.ActionEvent evt) // GEN-FIRST:event_jMenuItemLoadProjectActionPerformed
      { // GEN-HEADEREND:event_jMenuItemLoadProjectActionPerformed
    JFileChooser jfc = new JFileChooser();
    if (lastPath != null) {
      jfc.setCurrentDirectory(lastPath);
    }
    int fileDialogReturnVal = jfc.showOpenDialog(this);

    if (fileDialogReturnVal == JFileChooser.APPROVE_OPTION) {
      try {
        File inputFile = jfc.getSelectedFile();
        FileInputStream fis = new FileInputStream(inputFile);
        ObjectInputStream ois = new ObjectInputStream(fis);

        this.theProject = (Project) ois.readObject();
        this.currentResults = (LSAResults) ois.readObject();

        lastPath = new File(jfc.getSelectedFile().getPath());
      } catch (IOException e) {
        if (this.theProject == null) {
          log.log(Log.ERROR, "Failed to load project");
        }
        if (this.currentResults == null) {
          log.log(Log.WARNING, "Failed to load results");
        }

        log.log(Log.WARNING, e.getMessage());
      } catch (ClassNotFoundException e) {
        log.log(Log.ERROR, "Class not found error, version mismatch");
      }
    }
    if (this.theProject != null) {
      jMenuItemViewDocuments.setEnabled(true);
      jMenuItemSaveProject.setEnabled(true);
      this.setTitle(theProject.getProjectName());
      log.log(Log.INFO, "Project Loaded");
    }
    if (this.currentResults != null) {
      log.log(Log.INFO, "Results loaded");
    }
  } // GEN-LAST:event_jMenuItemLoadProjectActionPerformed

  private void jMenuItemSaveProjectActionPerformed(
      java.awt.event.ActionEvent evt) // GEN-FIRST:event_jMenuItemSaveProjectActionPerformed
      { // GEN-HEADEREND:event_jMenuItemSaveProjectActionPerformed
    if (this.theProject != null) {
      JFileChooser jfc = new JFileChooser();
      int fileDialogReturnVal = jfc.showSaveDialog(this);

      if (fileDialogReturnVal == JFileChooser.APPROVE_OPTION) {
        try {
          File outputFile = jfc.getSelectedFile();
          FileOutputStream fos = new FileOutputStream(outputFile);
          ObjectOutputStream oos = new ObjectOutputStream(fos);

          oos.writeObject(this.theProject);
          if (this.currentResults != null) {
            oos.writeObject(this.currentResults);
          }
        } catch (IOException e) {
          log.log(Log.ERROR, "Failed to save file\n" + e.getMessage());
        }
      }
    }
  } // GEN-LAST:event_jMenuItemSaveProjectActionPerformed

  private void jMenuItemCutActionPerformed(
      java.awt.event.ActionEvent evt) // GEN-FIRST:event_jMenuItemCutActionPerformed
      { // GEN-HEADEREND:event_jMenuItemCutActionPerformed
    Toolkit t = java.awt.Toolkit.getDefaultToolkit();
    Clipboard c = t.getSystemClipboard();

    JInternalFrame currentFrame = theDesktop.getSelectedFrame();
    /*StringSelection contents = new StringSelection(srcData);
    clipboard.setContents(contents, this);*/

  } // GEN-LAST:event_jMenuItemCutActionPerformed

  private void jMenuItemShowPreferencesActionPerformed(
      java.awt.event.ActionEvent evt) // GEN-FIRST:event_jMenuItemShowPreferencesActionPerformed
      { // GEN-HEADEREND:event_jMenuItemShowPreferencesActionPerformed
    PreferencesDialog.showPreferencesDialog(this, this.thePrefs);
  } // GEN-LAST:event_jMenuItemShowPreferencesActionPerformed

  private void jMenuItemLogGraphingStatsActionPerformed(
      java.awt.event.ActionEvent evt) { // GEN-FIRST:event_jMenuItemLogGraphingStatsActionPerformed
    if (currentResults != null) {
      // this.currentResults.printGraphingStats();
    }
  } // GEN-LAST:event_jMenuItemLogGraphingStatsActionPerformed

  private void jMenuItemLogGraphingStatsMouseClicked(
      java.awt.event.MouseEvent evt) { // GEN-FIRST:event_jMenuItemLogGraphingStatsMouseClicked
  } // GEN-LAST:event_jMenuItemLogGraphingStatsMouseClicked

  private void jMenuItemDumpChunksActionPerformed(
      java.awt.event.ActionEvent evt) { // GEN-FIRST:event_jMenuItemDumpChunksActionPerformed
    if (currentResults != null) {
      DocumentClassDialog d = DocumentClassDialog.showClassDialog(this, this.theProject);
      DocumentClass selectedClass = d.getSelectedDocumentClass();

      currentResults.dumpChunksBelongingToDocumentsWithClass(selectedClass.getId());
    }
  } // GEN-LAST:event_jMenuItemDumpChunksActionPerformed

  private void jMenuItemViewDebugActionPerformed(
      java.awt.event.ActionEvent evt) { // GEN-FIRST:event_jMenuItemViewDebugActionPerformed
    this.debugFrame.setVisible(true);
  } // GEN-LAST:event_jMenuItemViewDebugActionPerformed

  private void jMenuItemViewLSAResultsActionPerformed(
      java.awt.event.ActionEvent evt) { // GEN-FIRST:event_jMenuItemViewLSAResultsActionPerformed
    if (this.currentResults != null) {
      LSAResultsFrame f = new LSAResultsFrame(this.currentResults, this.theProject);
      theDesktop.add(f);
    }
  } // GEN-LAST:event_jMenuItemViewLSAResultsActionPerformed

  private void jMenuItemLoadLSAResultsActionPerformed(
      java.awt.event.ActionEvent evt) { // GEN-FIRST:event_jMenuItemLoadLSAResultsActionPerformed
    JFileChooser jfc = new JFileChooser();
    int fileDialogReturnVal = jfc.showOpenDialog(this);

    if (fileDialogReturnVal == JFileChooser.APPROVE_OPTION) {
      try {
        File inputFile = jfc.getSelectedFile();
        FileInputStream fis = new FileInputStream(inputFile);
        ObjectInputStream ois = new ObjectInputStream(fis);

        this.currentResults = (LSAResults) ois.readObject();
      } catch (IOException e) {
        log.log(Log.ERROR, "Failed to load LSA results\n" + e.getMessage());
      } catch (ClassNotFoundException e) {
        log.log(Log.ERROR, "Class not found : Error loading LSA results due to version mismatch");
      }

      System.out.println(currentResults == null);
    }
  } // GEN-LAST:event_jMenuItemLoadLSAResultsActionPerformed

  private void jMenuItemSaveLSAResultsActionPerformed(
      java.awt.event.ActionEvent evt) { // GEN-FIRST:event_jMenuItemSaveLSAResultsActionPerformed
    if (this.currentResults != null) {
      JFileChooser jfc = new JFileChooser();
      int fileDialogReturnVal = jfc.showSaveDialog(this);

      if (fileDialogReturnVal == JFileChooser.APPROVE_OPTION) {
        try {
          File outputFile = jfc.getSelectedFile();
          FileOutputStream fos = new FileOutputStream(outputFile);
          ObjectOutputStream oos = new ObjectOutputStream(fos);

          oos.writeObject(this.currentResults);
        } catch (IOException e) {
          System.out.println("IOexception");
          System.out.println(e.getMessage());
        }
      }
    }
  } // GEN-LAST:event_jMenuItemSaveLSAResultsActionPerformed

  private void jMenuItemPerformLSAActionPerformed(
      java.awt.event.ActionEvent evt) { // GEN-FIRST:event_jMenuItemPerformLSAActionPerformed
    Thread t =
        new LSAThread(
            theProject.getDocumentCollection(),
            theProject.getDocumentClassCollection(),
            thePrefs.get("lsa-regex"),
            this);
    t.start();
  } // GEN-LAST:event_jMenuItemPerformLSAActionPerformed

  private void jMenuItemImportActionPerformed(
      java.awt.event.ActionEvent evt) { // GEN-FIRST:event_jMenuItemImportActionPerformed
    JFileChooser jfc = new JFileChooser();
    jfc.setMultiSelectionEnabled(true);
    if (lastPath != null) {
      jfc.setCurrentDirectory(lastPath);
    }
    int fileDialogReturnVal = jfc.showOpenDialog(this);

    // now select the file
    if (fileDialogReturnVal == JFileChooser.APPROVE_OPTION) {
      // add code here to allow selection of a document class

      DocumentClassDialog d = DocumentClassDialog.showClassDialog(this, this.theProject);

      DocumentClass selectedClass = d.getSelectedDocumentClass();

      File[] selected = jfc.getSelectedFiles();
      for (int i = 0; i < selected.length; i++) {
        theProject.addNewDocument(selected[i], 1.0f, selected[i].toString(), selectedClass);
      }

      docFrameTableModel.fireTableDataChanged();

      lastPath = new File(jfc.getSelectedFile().getPath());
    }
  } // GEN-LAST:event_jMenuItemImportActionPerformed

  private void jMenuItemViewDocumentsActionPerformed(
      java.awt.event.ActionEvent evt) { // GEN-FIRST:event_jMenuItemViewDocumentsActionPerformed
    if (this.docFrameTableModel == null) {
      this.docFrameTableModel = new DocumentFrameTableModel(theProject.getDocumentCollection());
    }
    DocumentFrame d =
        new DocumentFrame(this.docFrameTableModel, theDesktop, theProject.getDocumentCollection());
    d.setVisible(true);
    theDesktop.add(d);
  } // GEN-LAST:event_jMenuItemViewDocumentsActionPerformed

  private void jMenuViewActionPerformed(
      java.awt.event.ActionEvent evt) { // GEN-FIRST:event_jMenuViewActionPerformed
  } // GEN-LAST:event_jMenuViewActionPerformed

  private void jMenuItemQuitActionPerformed(
      java.awt.event.ActionEvent evt) { // GEN-FIRST:event_jMenuItemQuitActionPerformed
    this.shutDown();
    System.exit(0);
  } // GEN-LAST:event_jMenuItemQuitActionPerformed

  private void jMenuItemNewProjectActionPerformed(
      java.awt.event.ActionEvent evt) { // GEN-FIRST:event_jMenuItemNewProjectActionPerformed
    String projectName = JOptionPane.showInputDialog(null, "Please enter a project name");

    if (projectName != null) {
      theProject = new Project(projectName);

      // now we've got a project we need to enalbe the save and close buttons
      jMenuItemSaveProject.setEnabled(true);
      jMenuItemCloseProject.setEnabled(true);
      jMenuItemViewDocuments.setEnabled(true);
      jMenuItemImport.setEnabled(true);

      // set title
      setTitle("LSAT - " + projectName);

      jMenuItemViewDocumentsActionPerformed(null);
    }
  } // GEN-LAST:event_jMenuItemNewProjectActionPerformed

  private void setWindowSizeAndTitle() {
    Dimension screenSize = this.getToolkit().getScreenSize();
    int newHeight = (int) ((float) screenSize.height * 0.7);
    int newWidth = (int) ((float) screenSize.width * 0.7);
    this.setSize(newWidth, newHeight);
    newHeight = (int) ((0.3 * (float) screenSize.height) / 2);
    newWidth = (int) ((0.3 * (float) screenSize.width) / 2);
    this.setLocation(newWidth, newHeight);
    this.setTitle("LSAT");
  }

  private void initUserComponents() {
    // required to attach windows to in the MDI world
    theDesktop = new JDesktopPane();
    setContentPane(theDesktop);

    // create and add the debug frame
    debugFrame = new DebugFrame();
    debugFrame.setVisible(false);
    theDesktop.add(debugFrame);
  }

  private void shutDown() {
    // nothing in here yet
  }

  public void showLSAResults(LSAResults results) {
    this.currentResults = results;
    jMenuItemViewLSAResultsActionPerformed(null);
  }

  // Variables declaration - do not modify//GEN-BEGIN:variables
  private javax.swing.JMenuBar jMenuBar;
  private javax.swing.JMenu jMenuEdit;
  private javax.swing.JMenu jMenuFile;
  private javax.swing.JMenuItem jMenuItemCloseProject;
  private javax.swing.JMenuItem jMenuItemCopy;
  private javax.swing.JMenuItem jMenuItemCut;
  private javax.swing.JMenuItem jMenuItemDumpChunks;
  private javax.swing.JMenuItem jMenuItemImport;
  private javax.swing.JMenuItem jMenuItemLoadLSAResults;
  private javax.swing.JMenuItem jMenuItemLoadProject;
  private javax.swing.JMenuItem jMenuItemLogGraphingStats;
  private javax.swing.JMenuItem jMenuItemLogRSGraphingStats;
  private javax.swing.JMenuItem jMenuItemNewProject;
  private javax.swing.JMenuItem jMenuItemPaste;
  private javax.swing.JMenuItem jMenuItemPerformLSA;
  private javax.swing.JMenuItem jMenuItemQuit;
  private javax.swing.JMenuItem jMenuItemSaveLSAResults;
  private javax.swing.JMenuItem jMenuItemSaveProject;
  private javax.swing.JMenuItem jMenuItemShowPreferences;
  private javax.swing.JMenuItem jMenuItemViewDebug;
  private javax.swing.JMenuItem jMenuItemViewDocuments;
  private javax.swing.JMenuItem jMenuItemViewLSAResults;
  private javax.swing.JMenuItem jMenuItemViewVisualResults;
  private javax.swing.JMenu jMenuLSA;
  private javax.swing.JMenu jMenuPreferences;
  private javax.swing.JMenu jMenuView;
  private javax.swing.JSeparator jSeparator;
  private javax.swing.JSeparator jSeparator1;
  private javax.swing.JSeparator jSeparator2;
  private javax.swing.JSeparator jSeparator3;
  // End of variables declaration//GEN-END:variables

  /** @param args the command line arguments */
  public static void main(String args[]) {
    java.awt.EventQueue.invokeLater(
        new Runnable() {
          public void run() {
            new LSATApplication().setVisible(true);
          }
        });
  }
}
Example #12
0
/**
 * ******************************************************** The Secondary NameNode is a helper to
 * the primary NameNode. The Secondary is responsible for supporting periodic checkpoints of the
 * HDFS metadata. The current design allows only one Secondary NameNode per HDFs cluster.
 *
 * <p>The Secondary NameNode is a daemon that periodically wakes up (determined by the schedule
 * specified in the configuration), triggers a periodic checkpoint and then goes back to sleep. The
 * Secondary NameNode uses the ClientProtocol to talk to the primary NameNode.
 *
 * <p>********************************************************
 */
public class SecondaryNameNode implements Runnable {

  public static final Log LOG = LogFactory.getLog(SecondaryNameNode.class.getName());

  private String fsName;
  private CheckpointStorage checkpointImage;
  private FSNamesystem namesystem;

  private NamenodeProtocol namenode;
  private Configuration conf;
  private InetSocketAddress nameNodeAddr;
  private volatile boolean shouldRun;
  private HttpServer infoServer;
  private int infoPort;
  private String infoBindAddress;

  private Collection<File> checkpointDirs;
  private Collection<File> checkpointEditsDirs;
  private long checkpointPeriod; // in seconds
  private long checkpointSize; // size (in MB) of current Edit Log

  /** Utility class to facilitate junit test error simulation. */
  static class ErrorSimulator {
    private static boolean[] simulation = null; // error simulation events

    static void initializeErrorSimulationEvent(int numberOfEvents) {
      simulation = new boolean[numberOfEvents];
      for (int i = 0; i < numberOfEvents; i++) {
        simulation[i] = false;
      }
    }

    static boolean getErrorSimulation(int index) {
      if (simulation == null) return false;
      assert (index < simulation.length);
      return simulation[index];
    }

    static void setErrorSimulation(int index) {
      assert (index < simulation.length);
      simulation[index] = true;
    }

    static void clearErrorSimulation(int index) {
      assert (index < simulation.length);
      simulation[index] = false;
    }
  }

  FSImage getFSImage() {
    return checkpointImage;
  }

  /** Create a connection to the primary namenode. */
  public SecondaryNameNode(Configuration conf) throws IOException {
    try {
      initialize(conf);
    } catch (IOException e) {
      shutdown();
      throw e;
    }
  }

  /** Initialize SecondaryNameNode. */
  private void initialize(Configuration conf) throws IOException {
    // initiate Java VM metrics
    JvmMetrics.init("SecondaryNameNode", conf.get("session.id"));

    // Create connection to the namenode.
    shouldRun = true;
    nameNodeAddr = NameNode.getAddress(conf);

    this.conf = conf;
    this.namenode =
        (NamenodeProtocol)
            RPC.waitForProxy(
                NamenodeProtocol.class, NamenodeProtocol.versionID, nameNodeAddr, conf);

    // initialize checkpoint directories
    fsName = getInfoServer();
    checkpointDirs = FSImage.getCheckpointDirs(conf, "/tmp/hadoop/dfs/namesecondary");
    checkpointEditsDirs = FSImage.getCheckpointEditsDirs(conf, "/tmp/hadoop/dfs/namesecondary");
    checkpointImage = new CheckpointStorage(conf);
    checkpointImage.recoverCreate(checkpointDirs, checkpointEditsDirs);

    // Initialize other scheduling parameters from the configuration
    checkpointPeriod = conf.getLong("fs.checkpoint.period", 3600);
    checkpointSize = conf.getLong("fs.checkpoint.size", 4194304);

    // initialize the webserver for uploading files.
    String infoAddr =
        NetUtils.getServerAddress(
            conf,
            "dfs.secondary.info.bindAddress",
            "dfs.secondary.info.port",
            "dfs.secondary.http.address");
    InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr);
    infoBindAddress = infoSocAddr.getHostName();
    int tmpInfoPort = infoSocAddr.getPort();
    infoServer = new HttpServer("secondary", infoBindAddress, tmpInfoPort, tmpInfoPort == 0, conf);
    infoServer.setAttribute("name.system.image", checkpointImage);
    this.infoServer.setAttribute("name.conf", conf);
    infoServer.addInternalServlet("getimage", "/getimage", GetImageServlet.class);
    infoServer.start();

    // The web-server port can be ephemeral... ensure we have the correct info
    infoPort = infoServer.getPort();
    conf.set("dfs.secondary.http.address", infoBindAddress + ":" + infoPort);
    LOG.info("Secondary Web-server up at: " + infoBindAddress + ":" + infoPort);
    LOG.warn(
        "Checkpoint Period   :"
            + checkpointPeriod
            + " secs "
            + "("
            + checkpointPeriod / 60
            + " min)");
    LOG.warn(
        "Log Size Trigger    :"
            + checkpointSize
            + " bytes "
            + "("
            + checkpointSize / 1024
            + " KB)");
  }

  /** Shut down this instance of the datanode. Returns only after shutdown is complete. */
  public void shutdown() {
    shouldRun = false;
    try {
      if (infoServer != null) infoServer.stop();
    } catch (Exception e) {
      LOG.warn("Exception shutting down SecondaryNameNode", e);
    }
    try {
      if (checkpointImage != null) checkpointImage.close();
    } catch (IOException e) {
      LOG.warn(StringUtils.stringifyException(e));
    }
  }

  //
  // The main work loop
  //
  public void run() {

    //
    // Poll the Namenode (once every 5 minutes) to find the size of the
    // pending edit log.
    //
    long period = 5 * 60; // 5 minutes
    long lastCheckpointTime = 0;
    if (checkpointPeriod < period) {
      period = checkpointPeriod;
    }

    while (shouldRun) {
      try {
        Thread.sleep(1000 * period);
      } catch (InterruptedException ie) {
        // do nothing
      }
      if (!shouldRun) {
        break;
      }
      try {
        long now = System.currentTimeMillis();

        long size = namenode.getEditLogSize();
        if (size >= checkpointSize || now >= lastCheckpointTime + 1000 * checkpointPeriod) {
          doCheckpoint();
          lastCheckpointTime = now;
        }
      } catch (IOException e) {
        LOG.error("Exception in doCheckpoint: ");
        LOG.error(StringUtils.stringifyException(e));
        e.printStackTrace();
        checkpointImage.imageDigest = null;
      } catch (Throwable e) {
        LOG.error("Throwable Exception in doCheckpoint: ");
        LOG.error(StringUtils.stringifyException(e));
        e.printStackTrace();
        Runtime.getRuntime().exit(-1);
      }
    }
  }

  /**
   * Download <code>fsimage</code> and <code>edits</code> files from the name-node.
   *
   * @return true if a new image has been downloaded and needs to be loaded
   * @throws IOException
   */
  private boolean downloadCheckpointFiles(CheckpointSignature sig) throws IOException {

    checkpointImage.cTime = sig.cTime;
    checkpointImage.checkpointTime = sig.checkpointTime;

    boolean downloadImage = true;
    String fileid;
    File[] srcNames;
    if (sig.imageDigest.equals(checkpointImage.imageDigest)) {
      downloadImage = false;
      LOG.info("Image has not changed. Will not download image.");
    } else {
      // get fsimage
      srcNames = checkpointImage.getImageFiles();
      assert srcNames.length > 0 : "No checkpoint targets.";
      fileid = "getimage=1";
      TransferFsImage.getFileClient(fsName, fileid, srcNames, false);
      checkpointImage.imageDigest = sig.imageDigest;
      LOG.info(
          "Downloaded file " + srcNames[0].getName() + " size " + srcNames[0].length() + " bytes.");
    }
    // get edits file
    fileid = "getedit=1";
    srcNames = checkpointImage.getEditsFiles();
    assert srcNames.length > 0 : "No checkpoint targets.";
    TransferFsImage.getFileClient(fsName, fileid, srcNames, false);
    LOG.info(
        "Downloaded file " + srcNames[0].getName() + " size " + srcNames[0].length() + " bytes.");

    checkpointImage.checkpointUploadDone(null);

    return downloadImage;
  }

  /** Copy the new fsimage into the NameNode */
  private void putFSImage(CheckpointSignature sig) throws IOException {
    String fileid =
        "putimage=1&port="
            + infoPort
            + "&machine="
            + InetAddress.getLocalHost().getHostAddress()
            + "&token="
            + sig.toString();
    LOG.info("Posted URL " + fsName + fileid);
    TransferFsImage.getFileClient(fsName, fileid, (File[]) null, false);
  }

  /** Returns the Jetty server that the Namenode is listening on. */
  private String getInfoServer() throws IOException {
    URI fsName = FileSystem.getDefaultUri(conf);
    if (!"hdfs".equals(fsName.getScheme())) {
      throw new IOException("This is not a DFS");
    }
    return NetUtils.getServerAddress(
        conf, "dfs.info.bindAddress", "dfs.info.port", "dfs.http.address");
  }

  /** Create a new checkpoint */
  void doCheckpoint() throws IOException {

    LOG.info("Checkpoint starting");

    // Do the required initialization of the merge work area.
    startCheckpoint();

    // Tell the namenode to start logging transactions in a new edit file
    // Returns a token that would be used to upload the merged image.
    CheckpointSignature sig = (CheckpointSignature) namenode.rollEditLog();

    // error simulation code for junit test
    if (ErrorSimulator.getErrorSimulation(0)) {
      throw new IOException("Simulating error0 " + "after creating edits.new");
    }

    boolean loadImage = downloadCheckpointFiles(sig); // Fetch fsimage and edits
    doMerge(sig, loadImage); // Do the merge

    //
    // Upload the new image into the NameNode. Then tell the Namenode
    // to make this new uploaded image as the most current image.
    //
    putFSImage(sig);

    // error simulation code for junit test
    if (ErrorSimulator.getErrorSimulation(1)) {
      throw new IOException("Simulating error1 " + "after uploading new image to NameNode");
    }

    namenode.rollFsImage(new CheckpointSignature(checkpointImage));
    checkpointImage.endCheckpoint();

    LOG.info("Checkpoint done. New Image Size: " + checkpointImage.getFsImageName().length());
  }

  private void startCheckpoint() throws IOException {
    checkpointImage.unlockAll();
    checkpointImage.getEditLog().close();
    checkpointImage.recoverCreate(checkpointDirs, checkpointEditsDirs);
    checkpointImage.startCheckpoint();
  }

  /** Merge downloaded image and edits and write the new image into current storage directory. */
  private void doMerge(CheckpointSignature sig, boolean loadImage) throws IOException {
    if (loadImage) { // create an empty namespace if new image
      namesystem = new FSNamesystem(checkpointImage, conf);
    }
    assert namesystem.dir.fsImage == checkpointImage;
    checkpointImage.doMerge(sig, loadImage);
  }

  /**
   * @param argv The parameters passed to this program.
   * @exception Exception if the filesystem does not exist.
   * @return 0 on success, non zero on error.
   */
  private int processArgs(String[] argv) throws Exception {

    if (argv.length < 1) {
      printUsage("");
      return -1;
    }

    int exitCode = -1;
    int i = 0;
    String cmd = argv[i++];

    //
    // verify that we have enough command line parameters
    //
    if ("-geteditsize".equals(cmd)) {
      if (argv.length != 1) {
        printUsage(cmd);
        return exitCode;
      }
    } else if ("-checkpoint".equals(cmd)) {
      if (argv.length != 1 && argv.length != 2) {
        printUsage(cmd);
        return exitCode;
      }
      if (argv.length == 2 && !"force".equals(argv[i])) {
        printUsage(cmd);
        return exitCode;
      }
    }

    exitCode = 0;
    try {
      if ("-checkpoint".equals(cmd)) {
        long size = namenode.getEditLogSize();
        if (size >= checkpointSize || argv.length == 2 && "force".equals(argv[i])) {
          doCheckpoint();
        } else {
          System.err.println(
              "EditLog size "
                  + size
                  + " bytes is "
                  + "smaller than configured checkpoint "
                  + "size "
                  + checkpointSize
                  + " bytes.");
          System.err.println("Skipping checkpoint.");
        }
      } else if ("-geteditsize".equals(cmd)) {
        long size = namenode.getEditLogSize();
        System.out.println("EditLog size is " + size + " bytes");
      } else {
        exitCode = -1;
        LOG.error(cmd.substring(1) + ": Unknown command");
        printUsage("");
      }
    } catch (RemoteException e) {
      //
      // This is a error returned by hadoop server. Print
      // out the first line of the error mesage, ignore the stack trace.
      exitCode = -1;
      try {
        String[] content;
        content = e.getLocalizedMessage().split("\n");
        LOG.error(cmd.substring(1) + ": " + content[0]);
      } catch (Exception ex) {
        LOG.error(cmd.substring(1) + ": " + ex.getLocalizedMessage());
      }
    } catch (IOException e) {
      //
      // IO exception encountered locally.
      //
      exitCode = -1;
      LOG.error(cmd.substring(1) + ": " + e.getLocalizedMessage());
    } finally {
      // Does the RPC connection need to be closed?
    }
    return exitCode;
  }

  /**
   * Displays format of commands.
   *
   * @param cmd The command that is being executed.
   */
  private void printUsage(String cmd) {
    if ("-geteditsize".equals(cmd)) {
      System.err.println("Usage: java SecondaryNameNode" + " [-geteditsize]");
    } else if ("-checkpoint".equals(cmd)) {
      System.err.println("Usage: java SecondaryNameNode" + " [-checkpoint [force]]");
    } else {
      System.err.println(
          "Usage: java SecondaryNameNode " + "[-checkpoint [force]] " + "[-geteditsize] ");
    }
  }

  /**
   * main() has some simple utility methods.
   *
   * @param argv Command line parameters.
   * @exception Exception if the filesystem does not exist.
   */
  public static void main(String[] argv) throws Exception {
    StringUtils.startupShutdownMessage(SecondaryNameNode.class, argv, LOG);
    Configuration tconf = new Configuration();
    if (argv.length >= 1) {
      SecondaryNameNode secondary = new SecondaryNameNode(tconf);
      int ret = secondary.processArgs(argv);
      System.exit(ret);
    }

    // Create a never ending deamon
    Daemon checkpointThread = new Daemon(new SecondaryNameNode(tconf));
    checkpointThread.start();
  }

  static class CheckpointStorage extends FSImage {
    /** */
    CheckpointStorage(Configuration conf) throws IOException {
      super(conf);
    }

    @Override
    public boolean isConversionNeeded(StorageDirectory sd) {
      return false;
    }

    /**
     * Analyze checkpoint directories. Create directories if they do not exist. Recover from an
     * unsuccessful checkpoint is necessary.
     *
     * @param dataDirs
     * @param editsDirs
     * @throws IOException
     */
    void recoverCreate(Collection<File> dataDirs, Collection<File> editsDirs) throws IOException {
      Collection<File> tempDataDirs = new ArrayList<File>(dataDirs);
      Collection<File> tempEditsDirs = new ArrayList<File>(editsDirs);
      this.storageDirs = new ArrayList<StorageDirectory>();
      setStorageDirectories(tempDataDirs, tempEditsDirs);
      for (Iterator<StorageDirectory> it = dirIterator(); it.hasNext(); ) {
        StorageDirectory sd = it.next();
        boolean isAccessible = true;
        try { // create directories if don't exist yet
          if (!sd.getRoot().mkdirs()) {
            // do nothing, directory is already created
          }
        } catch (SecurityException se) {
          isAccessible = false;
        }
        if (!isAccessible)
          throw new InconsistentFSStateException(
              sd.getRoot(), "cannot access checkpoint directory.");
        StorageState curState;
        try {
          curState = sd.analyzeStorage(HdfsConstants.StartupOption.REGULAR);
          // sd is locked but not opened
          switch (curState) {
            case NON_EXISTENT:
              // fail if any of the configured checkpoint dirs are inaccessible
              throw new InconsistentFSStateException(
                  sd.getRoot(), "checkpoint directory does not exist or is not accessible.");
            case NOT_FORMATTED:
              break; // it's ok since initially there is no current and VERSION
            case NORMAL:
              break;
            default: // recovery is possible
              sd.doRecover(curState);
          }
        } catch (IOException ioe) {
          sd.unlock();
          throw ioe;
        }
      }
    }

    /**
     * Prepare directories for a new checkpoint.
     *
     * <p>Rename <code>current</code> to <code>lastcheckpoint.tmp</code> and recreate <code>current
     * </code>.
     *
     * @throws IOException
     */
    void startCheckpoint() throws IOException {
      for (StorageDirectory sd : storageDirs) {
        moveCurrent(sd);
      }
    }

    void endCheckpoint() throws IOException {
      for (StorageDirectory sd : storageDirs) {
        moveLastCheckpoint(sd);
      }
    }

    /** Merge image and edits, and verify consistency with the signature. */
    private void doMerge(CheckpointSignature sig, boolean loadImage) throws IOException {
      getEditLog().open();
      StorageDirectory sdName = null;
      StorageDirectory sdEdits = null;
      Iterator<StorageDirectory> it = null;
      if (loadImage) {
        it = dirIterator(NameNodeDirType.IMAGE);
        if (it.hasNext()) sdName = it.next();
        if (sdName == null) throw new IOException("Could not locate checkpoint fsimage");
      }
      it = dirIterator(NameNodeDirType.EDITS);
      if (it.hasNext()) sdEdits = it.next();
      if (sdEdits == null) throw new IOException("Could not locate checkpoint edits");
      if (loadImage) {
        loadFSImage(FSImage.getImageFile(sdName, NameNodeFile.IMAGE));
      }
      loadFSEdits(sdEdits);
      sig.validateStorageInfo(this);
      saveNamespace(false);
    }
  }
}
Example #13
0
/** This is <em>not</em> reusable. */
@SuppressWarnings("deprecation")
public class BSONWritable implements BSONObject, WritableComparable {

  /** Constructs a new instance. */
  public BSONWritable() {
    _doc = new BasicBSONObject();
  }

  /**
   * Copy constructor, copies data from an existing BSONWritable
   *
   * @param other The BSONWritable to copy from
   */
  public BSONWritable(BSONWritable other) {
    this();
    copy(other);
  }

  /** Constructs a new instance around an existing BSONObject */
  public BSONWritable(BSONObject doc) {
    this();
    putAll(doc);
  }

  /**
   * {@inheritDoc}
   *
   * @see BSONObject#put(String,Object)
   */
  public Object put(String key, Object value) {
    return _doc.put(key, value);
  }

  /**
   * {@inheritDoc}
   *
   * @see BSONObject#putAll(BSONObject)
   */
  public void putAll(BSONObject otherDoc) {
    _doc.putAll(otherDoc);
  }

  /**
   * {@inheritDoc}
   *
   * @see BSONObject#putAll(Map)
   */
  public void putAll(Map otherMap) {
    _doc.putAll(otherMap);
  }

  /**
   * {@inheritDoc}
   *
   * @see BSONObject#get(String)
   */
  public Object get(String key) {
    return _doc.get(key);
  }

  /**
   * {@inheritDoc}
   *
   * @see BSONObject#toMap()
   */
  public Map toMap() {
    return _doc.toMap();
  }

  /**
   * {@inheritDoc}
   *
   * @see BSONObject#removeField(String)
   */
  public Object removeField(String key) {
    return _doc.removeField(key);
  }

  /**
   * {@inheritDoc}
   *
   * @see BSONObject#containsKey(String)
   */
  public boolean containsKey(String key) {
    return _doc.containsKey(key);
  }

  /**
   * {@inheritDoc}
   *
   * @see BSONObject#containsField(String)
   */
  public boolean containsField(String fieldName) {
    return _doc.containsField(fieldName);
  }

  /**
   * {@inheritDoc}
   *
   * @see BSONObject#keySet()
   */
  public Set<java.lang.String> keySet() {
    return _doc.keySet();
  }

  /**
   * {@inheritDoc}
   *
   * @see Writable#write(DataOutput)
   */
  public void write(DataOutput out) throws IOException {
    BSONEncoder enc = new BSONEncoder();
    BasicOutputBuffer buf = new BasicOutputBuffer();
    enc.set(buf);
    enc.putObject(_doc);
    enc.done();
    out.writeInt(buf.size());
    // For better performance we can copy BasicOutputBuffer.pipe(OutputStream)
    // to have a method signature that works with DataOutput
    out.write(buf.toByteArray());
  }

  /**
   * {@inheritDoc}
   *
   * @see Writable#readFields(DataInput)
   */
  public void readFields(DataInput in) throws IOException {
    BSONDecoder dec = new BSONDecoder();
    BSONCallback cb = new BasicBSONCallback();
    // Read the BSON length from the start of the record
    int dataLen = in.readInt();
    byte[] buf = new byte[dataLen];
    in.readFully(buf);
    dec.decode(buf, cb);
    _doc = (BSONObject) cb.get();
    log.info("Decoded a BSON Object: " + _doc);
  }

  /** {@inheritDoc} */
  @Override
  public String toString() {
    BSONEncoder enc = new BSONEncoder();
    BasicOutputBuffer buf = new BasicOutputBuffer();
    enc.set(buf);
    enc.putObject(_doc);
    enc.done();
    String str = buf.asString();
    log.debug("Output As String: '" + str + "'");
    return str;
  }

  /** Used by child copy constructors. */
  protected synchronized void copy(Writable other) {
    if (other != null) {
      try {
        DataOutputBuffer out = new DataOutputBuffer();
        other.write(out);
        DataInputBuffer in = new DataInputBuffer();
        in.reset(out.getData(), out.getLength());
        readFields(in);

      } catch (IOException e) {
        throw new IllegalArgumentException("map cannot be copied: " + e.getMessage());
      }

    } else {
      throw new IllegalArgumentException("source map cannot be null");
    }
  }

  public static class Comparator extends WritableComparator {
    public Comparator() {
      super(BSONWritable.class);
    }

    public int compare(WritableComparable a, WritableComparable b) {

      if (a instanceof BSONWritable && b instanceof BSONWritable) {
        return ((BSONWritable) a)._doc.toString().compareTo(((BSONWritable) b)._doc.toString());
      } else {
        return -1;
      }
    }
  }

  static { // register this comparator
    WritableComparator.define(BSONWritable.class, new Comparator());
  }

  @Override
  public int compareTo(Object o) {
    return new Comparator().compare(this, o);
  }

  protected BSONObject _doc;

  private static final Log log = LogFactory.getLog(BSONWritable.class);
}
Example #14
0
/**
 * Base class that runs a task in a separate process. Tasks are run in a separate process in order
 * to isolate the map/reduce system code from bugs in user supplied map and reduce functions.
 */
abstract class TaskRunner extends Thread {
  public static final Log LOG = LogFactory.getLog(TaskRunner.class);

  volatile boolean killed = false;
  private TaskTracker.TaskInProgress tip;
  private Task t;
  private Object lock = new Object();
  private volatile boolean done = false;
  private int exitCode = -1;
  private boolean exitCodeSet = false;

  private TaskTracker tracker;

  protected JobConf conf;
  JvmManager jvmManager;

  /** for cleaning up old map outputs */
  protected MapOutputFile mapOutputFile;

  public TaskRunner(TaskTracker.TaskInProgress tip, TaskTracker tracker, JobConf conf) {
    this.tip = tip;
    this.t = tip.getTask();
    this.tracker = tracker;
    this.conf = conf;
    this.mapOutputFile = new MapOutputFile(t.getJobID());
    this.mapOutputFile.setConf(conf);
    this.jvmManager = tracker.getJvmManagerInstance();
  }

  public Task getTask() {
    return t;
  }

  public TaskTracker.TaskInProgress getTaskInProgress() {
    return tip;
  }

  public TaskTracker getTracker() {
    return tracker;
  }

  /**
   * Called to assemble this task's input. This method is run in the parent process before the child
   * is spawned. It should not execute user code, only system code.
   */
  public boolean prepare() throws IOException {
    return true;
  }

  /**
   * Called when this task's output is no longer needed. This method is run in the parent process
   * after the child exits. It should not execute user code, only system code.
   */
  public void close() throws IOException {}

  private static String stringifyPathArray(Path[] p) {
    if (p == null) {
      return null;
    }
    StringBuffer str = new StringBuffer(p[0].toString());
    for (int i = 1; i < p.length; i++) {
      str.append(",");
      str.append(p[i].toString());
    }
    return str.toString();
  }

  @Override
  public final void run() {
    try {

      // before preparing the job localize
      // all the archives
      TaskAttemptID taskid = t.getTaskID();
      LocalDirAllocator lDirAlloc = new LocalDirAllocator("mapred.local.dir");
      File jobCacheDir = null;
      if (conf.getJar() != null) {
        jobCacheDir = new File(new Path(conf.getJar()).getParent().toString());
      }
      File workDir =
          new File(
              lDirAlloc
                  .getLocalPathToRead(
                      TaskTracker.getJobCacheSubdir()
                          + Path.SEPARATOR
                          + t.getJobID()
                          + Path.SEPARATOR
                          + t.getTaskID()
                          + Path.SEPARATOR
                          + MRConstants.WORKDIR,
                      conf)
                  .toString());

      URI[] archives = DistributedCache.getCacheArchives(conf);
      URI[] files = DistributedCache.getCacheFiles(conf);
      FileStatus fileStatus;
      FileSystem fileSystem;
      Path localPath;
      String baseDir;

      if ((archives != null) || (files != null)) {
        if (archives != null) {
          String[] archivesTimestamps = DistributedCache.getArchiveTimestamps(conf);
          Path[] p = new Path[archives.length];
          for (int i = 0; i < archives.length; i++) {
            fileSystem = FileSystem.get(archives[i], conf);
            fileStatus = fileSystem.getFileStatus(new Path(archives[i].getPath()));
            String cacheId = DistributedCache.makeRelative(archives[i], conf);
            String cachePath = TaskTracker.getCacheSubdir() + Path.SEPARATOR + cacheId;
            if (lDirAlloc.ifExists(cachePath, conf)) {
              localPath = lDirAlloc.getLocalPathToRead(cachePath, conf);
            } else {
              localPath = lDirAlloc.getLocalPathForWrite(cachePath, fileStatus.getLen(), conf);
            }
            baseDir = localPath.toString().replace(cacheId, "");
            p[i] =
                DistributedCache.getLocalCache(
                    archives[i],
                    conf,
                    new Path(baseDir),
                    fileStatus,
                    true,
                    Long.parseLong(archivesTimestamps[i]),
                    new Path(workDir.getAbsolutePath()),
                    false);
          }
          DistributedCache.setLocalArchives(conf, stringifyPathArray(p));
        }
        if ((files != null)) {
          String[] fileTimestamps = DistributedCache.getFileTimestamps(conf);
          Path[] p = new Path[files.length];
          for (int i = 0; i < files.length; i++) {
            fileSystem = FileSystem.get(files[i], conf);
            fileStatus = fileSystem.getFileStatus(new Path(files[i].getPath()));
            String cacheId = DistributedCache.makeRelative(files[i], conf);
            String cachePath = TaskTracker.getCacheSubdir() + Path.SEPARATOR + cacheId;
            if (lDirAlloc.ifExists(cachePath, conf)) {
              localPath = lDirAlloc.getLocalPathToRead(cachePath, conf);
            } else {
              localPath = lDirAlloc.getLocalPathForWrite(cachePath, fileStatus.getLen(), conf);
            }
            baseDir = localPath.toString().replace(cacheId, "");
            p[i] =
                DistributedCache.getLocalCache(
                    files[i],
                    conf,
                    new Path(baseDir),
                    fileStatus,
                    false,
                    Long.parseLong(fileTimestamps[i]),
                    new Path(workDir.getAbsolutePath()),
                    false);
          }
          DistributedCache.setLocalFiles(conf, stringifyPathArray(p));
        }
        Path localTaskFile = new Path(t.getJobFile());
        FileSystem localFs = FileSystem.getLocal(conf);
        localFs.delete(localTaskFile, true);
        OutputStream out = localFs.create(localTaskFile);
        try {
          conf.writeXml(out);
        } finally {
          out.close();
        }
      }

      if (!prepare()) {
        return;
      }

      String sep = System.getProperty("path.separator");
      StringBuffer classPath = new StringBuffer();
      // start with same classpath as parent process
      classPath.append(System.getProperty("java.class.path"));
      classPath.append(sep);
      if (!workDir.mkdirs()) {
        if (!workDir.isDirectory()) {
          LOG.fatal("Mkdirs failed to create " + workDir.toString());
        }
      }

      String jar = conf.getJar();
      if (jar != null) {
        // if jar exists, it into workDir
        File[] libs = new File(jobCacheDir, "lib").listFiles();
        if (libs != null) {
          for (int i = 0; i < libs.length; i++) {
            classPath.append(sep); // add libs from jar to classpath
            classPath.append(libs[i]);
          }
        }
        classPath.append(sep);
        classPath.append(new File(jobCacheDir, "classes"));
        classPath.append(sep);
        classPath.append(jobCacheDir);
      }

      // include the user specified classpath

      // archive paths
      Path[] archiveClasspaths = DistributedCache.getArchiveClassPaths(conf);
      if (archiveClasspaths != null && archives != null) {
        Path[] localArchives = DistributedCache.getLocalCacheArchives(conf);
        if (localArchives != null) {
          for (int i = 0; i < archives.length; i++) {
            for (int j = 0; j < archiveClasspaths.length; j++) {
              if (archives[i].getPath().equals(archiveClasspaths[j].toString())) {
                classPath.append(sep);
                classPath.append(localArchives[i].toString());
              }
            }
          }
        }
      }
      // file paths
      Path[] fileClasspaths = DistributedCache.getFileClassPaths(conf);
      if (fileClasspaths != null && files != null) {
        Path[] localFiles = DistributedCache.getLocalCacheFiles(conf);
        if (localFiles != null) {
          for (int i = 0; i < files.length; i++) {
            for (int j = 0; j < fileClasspaths.length; j++) {
              if (files[i].getPath().equals(fileClasspaths[j].toString())) {
                classPath.append(sep);
                classPath.append(localFiles[i].toString());
              }
            }
          }
        }
      }

      classPath.append(sep);
      classPath.append(workDir);
      //  Build exec child jmv args.
      Vector<String> vargs = new Vector<String>(8);
      File jvm = // use same jvm as parent
          new File(new File(System.getProperty("java.home"), "bin"), "java");

      vargs.add(jvm.toString());

      // Add child (task) java-vm options.
      //
      // The following symbols if present in mapred.child.java.opts value are
      // replaced:
      // + @taskid@ is interpolated with value of TaskID.
      // Other occurrences of @ will not be altered.
      //
      // Example with multiple arguments and substitutions, showing
      // jvm GC logging, and start of a passwordless JVM JMX agent so can
      // connect with jconsole and the likes to watch child memory, threads
      // and get thread dumps.
      //
      //  <property>
      //    <name>mapred.child.java.opts</name>
      //    <value>-verbose:gc -Xloggc:/tmp/@[email protected] \
      //           -Dcom.sun.management.jmxremote.authenticate=false \
      //           -Dcom.sun.management.jmxremote.ssl=false \
      //    </value>
      //  </property>
      //
      String javaOpts = conf.get("mapred.child.java.opts", "-Xmx200m");
      javaOpts = javaOpts.replace("@taskid@", taskid.toString());
      String[] javaOptsSplit = javaOpts.split(" ");

      // Add java.library.path; necessary for loading native libraries.
      //
      // 1. To support native-hadoop library i.e. libhadoop.so, we add the
      //    parent processes' java.library.path to the child.
      // 2. We also add the 'cwd' of the task to it's java.library.path to help
      //    users distribute native libraries via the DistributedCache.
      // 3. The user can also specify extra paths to be added to the
      //    java.library.path via mapred.child.java.opts.
      //
      String libraryPath = System.getProperty("java.library.path");
      if (libraryPath == null) {
        libraryPath = workDir.getAbsolutePath();
      } else {
        libraryPath += sep + workDir;
      }
      boolean hasUserLDPath = false;
      for (int i = 0; i < javaOptsSplit.length; i++) {
        if (javaOptsSplit[i].startsWith("-Djava.library.path=")) {
          javaOptsSplit[i] += sep + libraryPath;
          hasUserLDPath = true;
          break;
        }
      }
      if (!hasUserLDPath) {
        vargs.add("-Djava.library.path=" + libraryPath);
      }
      for (int i = 0; i < javaOptsSplit.length; i++) {
        vargs.add(javaOptsSplit[i]);
      }

      // add java.io.tmpdir given by mapred.child.tmp
      String tmp = conf.get("mapred.child.tmp", "./tmp");
      Path tmpDir = new Path(tmp);

      // if temp directory path is not absolute
      // prepend it with workDir.
      if (!tmpDir.isAbsolute()) {
        tmpDir = new Path(workDir.toString(), tmp);
      }
      FileSystem localFs = FileSystem.getLocal(conf);
      if (!localFs.mkdirs(tmpDir) && !localFs.getFileStatus(tmpDir).isDir()) {
        throw new IOException("Mkdirs failed to create " + tmpDir.toString());
      }
      vargs.add("-Djava.io.tmpdir=" + tmpDir.toString());

      // Add classpath.
      vargs.add("-classpath");
      vargs.add(classPath.toString());

      // Setup the log4j prop
      long logSize = TaskLog.getTaskLogLength(conf);
      vargs.add(
          "-Dhadoop.log.dir=" + new File(System.getProperty("hadoop.log.dir")).getAbsolutePath());
      vargs.add("-Dhadoop.root.logger=INFO,TLA");
      vargs.add("-Dhadoop.tasklog.taskid=" + taskid);
      vargs.add("-Dhadoop.tasklog.totalLogFileSize=" + logSize);

      if (conf.getProfileEnabled()) {
        if (conf.getProfileTaskRange(t.isMapTask()).isIncluded(t.getPartition())) {
          File prof = TaskLog.getTaskLogFile(taskid, TaskLog.LogName.PROFILE);
          vargs.add(String.format(conf.getProfileParams(), prof.toString()));
        }
      }

      // Add main class and its arguments
      vargs.add(Child.class.getName()); // main of Child
      // pass umbilical address
      InetSocketAddress address = tracker.getTaskTrackerReportAddress();
      vargs.add(address.getAddress().getHostAddress());
      vargs.add(Integer.toString(address.getPort()));
      vargs.add(taskid.toString()); // pass task identifier

      String pidFile = null;
      if (tracker.isTaskMemoryManagerEnabled()) {
        pidFile =
            lDirAlloc
                .getLocalPathForWrite(
                    (TaskTracker.getPidFilesSubdir() + Path.SEPARATOR + taskid), this.conf)
                .toString();
      }

      // set memory limit using ulimit if feasible and necessary ...
      String[] ulimitCmd = Shell.getUlimitMemoryCommand(conf);
      List<String> setup = null;
      if (ulimitCmd != null) {
        setup = new ArrayList<String>();
        for (String arg : ulimitCmd) {
          setup.add(arg);
        }
      }

      // Set up the redirection of the task's stdout and stderr streams
      File stdout = TaskLog.getTaskLogFile(taskid, TaskLog.LogName.STDOUT);
      File stderr = TaskLog.getTaskLogFile(taskid, TaskLog.LogName.STDERR);
      stdout.getParentFile().mkdirs();
      tracker.getTaskTrackerInstrumentation().reportTaskLaunch(taskid, stdout, stderr);

      Map<String, String> env = new HashMap<String, String>();
      StringBuffer ldLibraryPath = new StringBuffer();
      ldLibraryPath.append(workDir.toString());
      String oldLdLibraryPath = null;
      oldLdLibraryPath = System.getenv("LD_LIBRARY_PATH");
      if (oldLdLibraryPath != null) {
        ldLibraryPath.append(sep);
        ldLibraryPath.append(oldLdLibraryPath);
      }
      env.put("LD_LIBRARY_PATH", ldLibraryPath.toString());
      jvmManager.launchJvm(
          this,
          jvmManager.constructJvmEnv(
              setup, vargs, stdout, stderr, logSize, workDir, env, pidFile, conf));
      synchronized (lock) {
        while (!done) {
          lock.wait();
        }
      }
      tracker.getTaskTrackerInstrumentation().reportTaskEnd(t.getTaskID());
      if (exitCodeSet) {
        if (!killed && exitCode != 0) {
          if (exitCode == 65) {
            tracker.getTaskTrackerInstrumentation().taskFailedPing(t.getTaskID());
          }
          throw new IOException("Task process exit with nonzero status of " + exitCode + ".");
        }
      }
    } catch (FSError e) {
      LOG.fatal("FSError", e);
      try {
        tracker.fsError(t.getTaskID(), e.getMessage());
      } catch (IOException ie) {
        LOG.fatal(t.getTaskID() + " reporting FSError", ie);
      }
    } catch (Throwable throwable) {
      LOG.warn(t.getTaskID() + " Child Error", throwable);
      ByteArrayOutputStream baos = new ByteArrayOutputStream();
      throwable.printStackTrace(new PrintStream(baos));
      try {
        tracker.reportDiagnosticInfo(t.getTaskID(), baos.toString());
      } catch (IOException e) {
        LOG.warn(t.getTaskID() + " Reporting Diagnostics", e);
      }
    } finally {
      try {
        URI[] archives = DistributedCache.getCacheArchives(conf);
        URI[] files = DistributedCache.getCacheFiles(conf);
        if (archives != null) {
          for (int i = 0; i < archives.length; i++) {
            DistributedCache.releaseCache(archives[i], conf);
          }
        }
        if (files != null) {
          for (int i = 0; i < files.length; i++) {
            DistributedCache.releaseCache(files[i], conf);
          }
        }
      } catch (IOException ie) {
        LOG.warn("Error releasing caches : Cache files might not have been cleaned up");
      }
      tracker.reportTaskFinished(t.getTaskID(), false);
      if (t.isMapTask()) {
        tracker.addFreeMapSlot();
      } else {
        tracker.addFreeReduceSlot();
      }
    }
  }

  // Mostly for setting up the symlinks. Note that when we setup the distributed
  // cache, we didn't create the symlinks. This is done on a per task basis
  // by the currently executing task.
  public static void setupWorkDir(JobConf conf) throws IOException {
    File workDir = new File(".").getAbsoluteFile();
    FileUtil.fullyDelete(workDir);
    if (DistributedCache.getSymlink(conf)) {
      URI[] archives = DistributedCache.getCacheArchives(conf);
      URI[] files = DistributedCache.getCacheFiles(conf);
      Path[] localArchives = DistributedCache.getLocalCacheArchives(conf);
      Path[] localFiles = DistributedCache.getLocalCacheFiles(conf);
      if (archives != null) {
        for (int i = 0; i < archives.length; i++) {
          String link = archives[i].getFragment();
          if (link != null) {
            link = workDir.toString() + Path.SEPARATOR + link;
            File flink = new File(link);
            if (!flink.exists()) {
              FileUtil.symLink(localArchives[i].toString(), link);
            }
          }
        }
      }
      if (files != null) {
        for (int i = 0; i < files.length; i++) {
          String link = files[i].getFragment();
          if (link != null) {
            link = workDir.toString() + Path.SEPARATOR + link;
            File flink = new File(link);
            if (!flink.exists()) {
              FileUtil.symLink(localFiles[i].toString(), link);
            }
          }
        }
      }
    }
    File jobCacheDir = null;
    if (conf.getJar() != null) {
      jobCacheDir = new File(new Path(conf.getJar()).getParent().toString());
    }

    // create symlinks for all the files in job cache dir in current
    // workingdir for streaming
    try {
      DistributedCache.createAllSymlink(conf, jobCacheDir, workDir);
    } catch (IOException ie) {
      // Do not exit even if symlinks have not been created.
      LOG.warn(StringUtils.stringifyException(ie));
    }
    // add java.io.tmpdir given by mapred.child.tmp
    String tmp = conf.get("mapred.child.tmp", "./tmp");
    Path tmpDir = new Path(tmp);

    // if temp directory path is not absolute
    // prepend it with workDir.
    if (!tmpDir.isAbsolute()) {
      tmpDir = new Path(workDir.toString(), tmp);
      FileSystem localFs = FileSystem.getLocal(conf);
      if (!localFs.mkdirs(tmpDir) && !localFs.getFileStatus(tmpDir).isDir()) {
        throw new IOException("Mkdirs failed to create " + tmpDir.toString());
      }
    }
  }

  /** Kill the child process */
  public void kill() {
    killed = true;
    jvmManager.taskKilled(this);
    signalDone();
  }

  public void signalDone() {
    synchronized (lock) {
      done = true;
      lock.notify();
    }
  }

  public void setExitCode(int exitCode) {
    this.exitCodeSet = true;
    this.exitCode = exitCode;
  }
}
/**
 * Created with IntelliJ IDEA. User: ciobi Date: 2013-06-15 Time: 09:56
 *
 * <p>
 */
public class ReaderHandler extends WebAppContext {

  public static final Log LOG = LogFactory.getLog(ReaderHandler.class);
  // ttt1 option that on http only redirects to https, for all paths

  public static final String ACTION_LOGIN = "******";
  public static final String ACTION_SIGNUP = "signup";
  public static final String ACTION_CHANGE_PASSWORD = "******";
  public static final String ACTION_CHANGE_SETTINGS = "change_settings";
  public static final String ACTION_ADD_FEED = "add_feed";
  public static final String ACTION_REMOVE_FEED = "remove_feed";
  public static final String ACTION_UPDATE_FEED_LIST = "update_feed_list"; // for ordering, //ttt2

  public static final String PATH_LOGIN = "******" + ACTION_LOGIN;
  public static final String PATH_CHANGE_PASSWORD = "******" + ACTION_CHANGE_PASSWORD;
  public static final String PATH_CHANGE_SETTINGS = "/" + ACTION_CHANGE_SETTINGS;
  public static final String PATH_SIGNUP = "/" + ACTION_SIGNUP;
  public static final String PATH_ADD_FEED = "/" + ACTION_ADD_FEED;
  public static final String PATH_REMOVE_FEED = "/" + ACTION_REMOVE_FEED;
  public static final String PATH_UPDATE_FEED_LIST = "/" + ACTION_UPDATE_FEED_LIST;
  public static final String PATH_ERROR = "/error";
  public static final String PATH_LOGOUT = "/logout";
  public static final String PATH_SETTINGS = "/settings";
  public static final String PATH_FEEDS = "/feeds";
  public static final String PATH_FEED = "/feed";
  public static final String PATH_ADMIN = "/admin";
  public static final String PATH_FEED_ADMIN = "/feed_admin";
  public static final String PATH_OPEN_ARTICLE =
      "/open_article/"; // !!! it's easier to end this one with a slash

  // params we use to send strings to the JSPs or to get user input in POST, via
  // request.getParameter(), or both
  public static final String PARAM_USER_ID = "userId";
  public static final String PARAM_USER_NAME = "name";
  public static final String PARAM_EMAIL = "email";
  public static final String PARAM_CURRENT_PASSWORD = "******";
  public static final String PARAM_PASSWORD = "******";
  public static final String PARAM_PASSWORD_CONFIRM = "passwordConfirm";
  public static final String PARAM_PATH = "path";
  // public static final String PARAM_ERROR = "error";
  public static final String PARAM_REMEMBER_ACCOUNT = "rememberAccount";
  public static final String PARAM_NEW_FEED_URL = "feedUrl";
  public static final String PARAM_FEED_ID = "feedId";
  public static final String PARAM_ITEMS_PER_PAGE = "itemsPerPage";
  public static final String PARAM_STYLE = "style";
  public static final String PARAM_FEED_DATE_FORMAT = "feedDateFormat";

  // variable names, used to give JSPs access to Java objects in the handler via
  // request.getAttribute(()
  public static final String VAR_USER = "******";
  public static final String VAR_LOGIN_INFO = "loginInfo";
  public static final String VAR_USER_DB = "userDb";
  public static final String VAR_FEED_DB = "feedDb";
  public static final String VAR_ARTICLE_DB = "articleDb";
  public static final String VAR_READ_ARTICLES_COLL_DB = "readArticlesCollDb";

  public static final String BROWSER_ID = "browserId";
  public static final String SESSION_ID = "sessionId";

  private LoginInfo.DB loginInfoDb;
  private User.DB userDb;
  private Feed.DB feedDb;
  private Article.DB articleDb;
  private ReadArticlesColl.DB readArticlesCollDb;

  private UserHelpers userHelpers;

  private boolean isInJar = Utils.isInJar();

  private static class ReaderErrorHandler extends ErrorHandler {
    @Override // !!! note that this gets called for missing pages, but not if exceptions are thrown;
              // exceptions are handled separately
    public void handle(
        String target,
        Request request,
        HttpServletRequest httpServletRequest,
        HttpServletResponse httpServletResponse)
        throws IOException {
      request.setHandled(true);
      httpServletResponse
          .getWriter()
          .println(
              String.format("<h1>Page doesn't exist: %s</h1>", request.getUri().getDecodedPath()));
    }
  }

  private static HashMap<String, String> PATH_MAPPING = new HashMap<>();

  static {
    PATH_MAPPING.put("", "home_page");
    PATH_MAPPING.put(PATH_LOGIN, "login");
    PATH_MAPPING.put(PATH_LOGOUT, "login"); // !!! after logout we get redirected to /login
    PATH_MAPPING.put(PATH_SIGNUP, "signup");
    PATH_MAPPING.put(PATH_ERROR, "error");
    PATH_MAPPING.put(PATH_FEED_ADMIN, "feed_admin");
    PATH_MAPPING.put(PATH_SETTINGS, "settings");
    PATH_MAPPING.put(PATH_FEEDS, "feeds");
    PATH_MAPPING.put(PATH_FEED + "/*", "feed");
    PATH_MAPPING.put(PATH_ADMIN, "admin");
  }

  public ReaderHandler(LowLevelDbAccess lowLevelDbAccess, String webDir) {

    loginInfoDb = new LoginInfo.DB(lowLevelDbAccess);
    userDb = new User.DB(lowLevelDbAccess);
    feedDb = new Feed.DB(lowLevelDbAccess);
    articleDb = new Article.DB(lowLevelDbAccess);
    readArticlesCollDb = new ReadArticlesColl.DB(lowLevelDbAccess);
    userHelpers = new UserHelpers(loginInfoDb, userDb);

    setContextPath("/");

    File warPath = new File(webDir);
    setWar(warPath.getAbsolutePath());

    if (isInJar) {
      for (Map.Entry<String, String> entry : PATH_MAPPING.entrySet()) {
        addPrebuiltJsp(entry.getKey(), "jsp." + entry.getValue().replaceAll("_", "_005f") + "_jsp");
      }
    } else {
      for (Map.Entry<String, String> entry : PATH_MAPPING.entrySet()) {
        addServlet(
            new ServletHolder(new RedirectServlet("/" + entry.getValue() + ".jsp")),
            entry.getKey());
      }
    }

    setErrorHandler(new ReaderErrorHandler());
  }

  private void addPrebuiltJsp(String path, String className) {
    try {
      Class clazz =
          Class.forName(
              className); // ttt2 see if possible to not use this, preferably without doing
                          // redirections like RedirectServlet
      Object obj = clazz.newInstance();
      addServlet(new ServletHolder((Servlet) obj), path);
      LOG.info("Added prebuilt JSP: " + obj.toString());
    } catch (Exception e) {
      LOG.fatal(String.format("Failed to load prebuilt JSP for %s and %s", path, className), e);
    }
  }

  @Override
  public void doHandle(
      String target,
      Request request,
      HttpServletRequest httpServletRequest,
      HttpServletResponse httpServletResponse)
      throws IOException, ServletException {

    LOG.info("handling " + target);

    // !!! doHandle() is called twice for a request when using redirectiion, first time with
    // request.getPathInfo()
    // set to the URI and target set to the path, then with request.getPathInfo() set to null and
    // target set to the .jsp
    try {
      // request.setHandled(true);
      boolean secured;
      if (request.getScheme().equals("https")) {
        secured = true;
      } else if (request.getScheme().equals("http")) {
        secured = false;
      } else {
        httpServletResponse
            .getWriter()
            .println(
                String.format(
                    "<h1>Unknown scheme %s at %s</h1>",
                    request.getScheme(), request.getUri().getDecodedPath()));
        return;
      }

      if (request.getMethod().equals("GET")) {
        if (isInJar || target.endsWith(".jsp")) {
          // !!! when not in jar there's no need to do anything about params if it's not a .jsp,
          // as this will get called again for the corresponding .jsp
          if (prepareForJspGet(target, request, httpServletResponse, secured)) {
            return;
          }
        }
        if (target.startsWith(PATH_OPEN_ARTICLE)) {
          handleOpenArticle(request, httpServletResponse, target);
          return;
        }
        super.doHandle(target, request, httpServletRequest, httpServletResponse);
        LOG.info("handling of " + target + " went to super");

        // httpServletResponse.setDateHeader("Date", System.currentTimeMillis());     //ttt2 review
        // these, probably not use
        // httpServletResponse.setDateHeader("Expires", System.currentTimeMillis() + 60000);

        return;
      }

      if (request.getMethod().equals("POST")) {
        if (request.getUri().getDecodedPath().equals(PATH_LOGIN)) {
          handleLoginPost(request, httpServletResponse, secured);
        } else if (request.getUri().getDecodedPath().equals(PATH_SIGNUP)) {
          handleSignupPost(request, httpServletResponse);
        } else if (request.getUri().getDecodedPath().equals(PATH_CHANGE_PASSWORD)) {
          handleChangePasswordPost(request, httpServletResponse);
        } else if (request.getUri().getDecodedPath().equals(PATH_UPDATE_FEED_LIST)) {
          handleUpdateFeedListPost(request, httpServletResponse);
        } else if (request.getUri().getDecodedPath().equals(PATH_ADD_FEED)) {
          handleAddFeedPost(request, httpServletResponse);
        } else if (request.getUri().getDecodedPath().equals(PATH_REMOVE_FEED)) {
          handleRemoveFeedPost(request, httpServletResponse);
        } else if (request.getUri().getDecodedPath().equals(PATH_CHANGE_SETTINGS)) {
          handleChangeSettingsPost(request, httpServletResponse);
        }
      }

      /*{ // for tests only;
          httpServletResponse.getWriter().println(String.format("<h1>Unable to process request %s</h1>",
                  request.getUri().getDecodedPath()));
          request.setHandled(true);
      }*/
    } catch (Exception e) {
      LOG.error("Error processing request", e);
      try {
        // redirectToError(e.toString(), request, httpServletResponse); //!!! redirectToError leads
        // to infinite loop, probably related to
        // the fact that we get 2 calls for a regular request when redirecting
        httpServletResponse
            .getWriter()
            .println(
                String.format(
                    "<h1>Unable to process request %s</h1>", // ttt1 generate some HTML
                    request.getUri().getDecodedPath()));
        request.setHandled(true);
      } catch (Exception e1) {
        LOG.error("Error redirecting", e1);
      }
    }
  }

  /**
   * Normally sets the path and a few attributes that the JSPs are likely to need. Also verifies the
   * login information. If necessary, just redirects to the login page.
   *
   * @param target
   * @param request
   * @param httpServletResponse
   * @param secured
   * @return true if the request is already handled so the .jsp shouldn't get called
   * @throws Exception
   */
  private boolean prepareForJspGet(
      String target, Request request, HttpServletResponse httpServletResponse, boolean secured)
      throws Exception {

    LoginInfo.SessionInfo sessionInfo = UserHelpers.getSessionInfo(request);

    LOG.info(
        String.format(
            "hndl - %s ; %s; %s ; %s",
            target,
            request.getPathInfo(),
            request.getMethod(),
            secured ? "secured" : "not secured"));

    String path = request.getUri().getDecodedPath();

    boolean redirectToLogin = path.equals(PATH_LOGOUT);
    LoginInfo loginInfo = null;
    if (sessionInfo.isNull()) {
      redirectToLogin = true;
      LOG.info("Null session info. Logging in again.");
    } else {
      loginInfo =
          loginInfoDb.get(
              sessionInfo.browserId,
              sessionInfo.sessionId); // ttt2 use a cache, to avoid going to DB
      if (loginInfo == null || loginInfo.expiresOn < System.currentTimeMillis()) {
        LOG.info("Session has expired. Logging in again. Info: " + loginInfo);
        redirectToLogin = true;
      }
    }

    if (!path.equals(PATH_LOGIN) && !path.equals(PATH_SIGNUP) && !path.equals(PATH_ERROR)) {

      if (redirectToLogin) {
        // ttt2 perhaps store URI, to return to it after login
        logOut(sessionInfo.browserId);
        addLoginParams(request, loginInfo);
        httpServletResponse.sendRedirect(PATH_LOGIN);
        return true;
      }

      User user = userDb.get(loginInfo.userId);
      if (user == null) {
        WebUtils.redirectToError("Unknown user", request, httpServletResponse);
        return true;
      }
      if (!user.active) {
        WebUtils.redirectToError("Account is not active", request, httpServletResponse);
        return true;
      }
      request.setAttribute(VAR_FEED_DB, feedDb);
      request.setAttribute(VAR_USER_DB, userDb);
      request.setAttribute(VAR_ARTICLE_DB, articleDb);
      request.setAttribute(VAR_READ_ARTICLES_COLL_DB, readArticlesCollDb);

      request.setAttribute(VAR_USER, user);
      request.setAttribute(VAR_LOGIN_INFO, loginInfo);

      MultiMap<String> params = new MultiMap<>();
      params.put(PARAM_PATH, path);
      request.setParameters(params);
    }

    if (path.equals(PATH_LOGIN)) {
      addLoginParams(request, loginInfo);
    }
    return false;
  }

  private void handleOpenArticle(
      Request request, HttpServletResponse httpServletResponse, String target) throws Exception {
    try {
      int k1 = target.indexOf('/', 1);
      int k2 = target.indexOf('/', k1 + 1);
      String feedId = target.substring(k1 + 1, k2);
      String strSeq = target.substring(k2 + 1);
      int seq = Integer.parseInt(strSeq);
      Article article = articleDb.get(feedId, seq);
      LoginInfo loginInfo = userHelpers.getLoginInfo(request);
      // ttt2 using the link from a non-authenticated browser causes a NPE; maybe do something
      // better, e.g. sign up
      ReadArticlesColl readArticlesColl = readArticlesCollDb.get(loginInfo.userId, feedId);
      if (readArticlesColl == null) {
        readArticlesColl = new ReadArticlesColl(loginInfo.userId, feedId);
      }
      if (!readArticlesColl.isRead(seq)) {
        readArticlesColl.markRead(seq, Config.getConfig().maxSizeForReadArticles);
        readArticlesCollDb.add(readArticlesColl);
      }
      String s =
          URIUtil.encodePath(article.url)
              .replace("%3F", "?")
              .replace("%23", "#"); // ttt2 see how to do this right
      httpServletResponse.sendRedirect(s);
    } catch (Exception e) {
      WebUtils.showResult(
          String.format("Failed to get article for path %s. %s", target, e),
          "/",
          request,
          httpServletResponse);
    }
  }

  private void handleSignupPost(Request request, HttpServletResponse httpServletResponse)
      throws Exception {
    String userId = request.getParameter(PARAM_USER_ID);
    String userName = request.getParameter(PARAM_USER_NAME);
    String email = request.getParameter(PARAM_EMAIL);
    String stringPassword = request.getParameter(PARAM_PASSWORD);
    String stringPasswordConfirm = request.getParameter(PARAM_PASSWORD_CONFIRM);

    if (!stringPassword.equals(stringPasswordConfirm)) {
      WebUtils.redirectToError(
          "Mismatch between password and password confirmation", request, httpServletResponse);
      return;
    }

    SecureRandom secureRandom = new SecureRandom();
    String salt = "" + secureRandom.nextLong();
    byte[] password = User.computeHashedPassword(stringPassword, salt);
    User user = userDb.get(userId);
    if (user != null) {
      WebUtils.redirectToError(
          "There already exists a user with the ID " + userId, request, httpServletResponse);
      return;
    }

    user =
        new User(
            userId,
            userName,
            password,
            salt,
            email,
            new ArrayList<String>(),
            Config.getConfig().activateAccountsAtCreation,
            false);
    // ttt2 add confirmation by email, captcha, ...
    List<String> fieldErrors = user.checkFields();
    if (!fieldErrors.isEmpty()) {
      StringBuilder bld =
          new StringBuilder("Invalid values when trying to create user with ID ")
              .append(userId)
              .append("<br/>");
      for (String s : fieldErrors) {
        bld.append(s).append("<br/>");
      }
      WebUtils.redirectToError(bld.toString(), request, httpServletResponse);
      return;
    }

    // ttt2 2 clients can add the same userId simultaneously
    userDb.add(user);

    httpServletResponse.sendRedirect("/");
  }

  private void handleChangePasswordPost(Request request, HttpServletResponse httpServletResponse)
      throws Exception {

    LoginInfo loginInfo = userHelpers.getLoginInfo(request);
    if (loginInfo == null) {
      WebUtils.redirectToError("Couldn't determine the current user", request, httpServletResponse);
      return;
    }

    String userId = loginInfo.userId;
    String stringCrtPassword = request.getParameter(PARAM_CURRENT_PASSWORD);
    String stringNewPassword = request.getParameter(PARAM_PASSWORD);
    String stringNewPasswordConfirm = request.getParameter(PARAM_PASSWORD_CONFIRM);

    if (!stringNewPassword.equals(stringNewPasswordConfirm)) {
      showResult(
          "Mismatch between password and password confirmation",
          PATH_SETTINGS,
          request,
          httpServletResponse);
      return;
    }

    User user =
        userDb.get(
            userId); // ttt1 crashes for wrong ID; 2013.07.20 - no longer have an idea what this is
                     // about
    if (user == null) {
      WebUtils.redirectToError("Couldn't find the current user", request, httpServletResponse);
      return;
    }

    if (!user.checkPassword(stringCrtPassword)) {
      showResult("Incorrect current password", PATH_SETTINGS, request, httpServletResponse);
      return;
    }

    SecureRandom secureRandom = new SecureRandom();
    String salt = "" + secureRandom.nextLong();
    byte[] password = User.computeHashedPassword(stringNewPassword, salt);
    user.salt = salt;
    user.password = password;

    // ttt3 2 clients can change the password simultaneously
    userDb.add(user);

    // httpServletResponse.sendRedirect(PATH_SETTINGS);
    showResult("Password changed", PATH_SETTINGS, request, httpServletResponse);
  }

  private void handleChangeSettingsPost(Request request, HttpServletResponse httpServletResponse)
      throws Exception {

    LoginInfo loginInfo = userHelpers.getLoginInfo(request);
    if (loginInfo == null) {
      WebUtils.redirectToError("Couldn't determine the current user", request, httpServletResponse);
      return;
    }

    String stringItemsPerPage = request.getParameter(PARAM_ITEMS_PER_PAGE);
    try {
      loginInfo.itemsPerPage = Integer.parseInt(stringItemsPerPage);
    } catch (Exception e) {
      showResult(
          "Error trying to set the items per page. Expected integer value but got "
              + stringItemsPerPage,
          PATH_SETTINGS,
          request,
          httpServletResponse);
      return;
    }
    loginInfo.style = request.getParameter(PARAM_STYLE);
    loginInfo.feedDateFormat =
        request.getParameter(PARAM_FEED_DATE_FORMAT); // ttt2 validate, better in JSP

    loginInfoDb.add(loginInfo);

    // httpServletResponse.sendRedirect(PATH_SETTINGS);
    showResult("Settings changed", "/", request, httpServletResponse);
  }

  private void handleUpdateFeedListPost(Request request, HttpServletResponse httpServletResponse)
      throws Exception {
    LOG.info("updating feed list"); // ttt2 implement
    httpServletResponse.sendRedirect(PATH_FEED_ADMIN);
  }

  private void handleAddFeedPost(Request request, HttpServletResponse httpServletResponse)
      throws Exception {
    LOG.info("adding feed");
    User user = userHelpers.getUser(request);

    try {
      if (user == null) {
        LOG.error("User not found");
        return;
      }

      String url = request.getParameter(PARAM_NEW_FEED_URL);
      // ttt1 add some validation; probably best try to actually get data, set the title, ...
      if (url == null || url.equals("")) {
        LOG.error("New feed not specified");
        // ttt1 show some error
        return;
      }

      MessageDigest digest = MessageDigest.getInstance("MD5");
      String feedId = PrintUtils.byteArrayAsUrlString(digest.digest(url.getBytes("UTF-8")));
      feedId = feedId.substring(0, Config.getConfig().feedIdSize);

      Feed feed = feedDb.get(feedId);
      if (feed == null) {
        feed = new Feed(feedId, url);
        feedDb.add(feed);
      }

      if (user.feedIds.contains(feedId)) {
        LOG.error(String.format("Trying to add existing feed %s to user %s", feedId, user));
      } else {
        user.feedIds.add(feedId);
        userDb.updateFeeds(user);
      }
    } finally {
      httpServletResponse.sendRedirect(PATH_FEED_ADMIN);
    }
  }

  private void handleRemoveFeedPost(Request request, HttpServletResponse httpServletResponse)
      throws Exception {
    LOG.info("removing feed");
    User user = userHelpers.getUser(request);

    try {
      if (user == null) {
        LOG.error("User not found");
        return;
      }

      String feedId = request.getParameter(PARAM_FEED_ID);

      LOG.info(String.format("Removing feed %s for user %s", feedId, user));

      // ttt1 add some validation; probably best try to actually get data, set the title, ...
      if (feedId == null || feedId.equals("")) {
        LOG.error("feed not specified");
        // ttt1 show some error
        return;
      }

      if (user.feedIds.remove(
          feedId)) { // ttt2 clean up the global feed table; that's probably better done if nobody
                     // accesses a feed for 3 months or so
        userDb.updateFeeds(user);
        LOG.info(String.format("Removed feed %s for user %s", feedId, user));
      } else {
        LOG.info(String.format("No feed found with ID %s for user %s", feedId, user));
      }
    } finally {
      httpServletResponse.sendRedirect(PATH_FEED_ADMIN);
    }
  }

  private void handleLoginPost(
      Request request, HttpServletResponse httpServletResponse, boolean secured) throws Exception {
    String userId = request.getParameter(PARAM_USER_ID);
    String password = request.getParameter(PARAM_PASSWORD);
    String rememberAccountStr = request.getParameter(PARAM_REMEMBER_ACCOUNT);
    boolean rememberAccount = Boolean.parseBoolean(rememberAccountStr);
    LoginInfo.SessionInfo sessionInfo = UserHelpers.getSessionInfo(request);

    logOut(sessionInfo.browserId);

    User user = userDb.get(userId);
    if (user == null) {
      WebUtils.redirectToError("User " + userId + " not found", request, httpServletResponse);
      return;
    }

    if (!user.checkPassword(password)) {
      WebUtils.redirectToError("Invalid password", request, httpServletResponse);
      return;
    }

    if (!user.active) {
      WebUtils.redirectToError(
          "Account for User " + userId + " needs to be activated", request, httpServletResponse);
      return;
    }

    LOG.info("Logged in user " + userId);

    sessionInfo.sessionId = null;
    if (sessionInfo.browserId == null) {
      sessionInfo.browserId = getRandomId();
    } else {
      for (LoginInfo loginInfo : loginInfoDb.getLoginsForBrowser(sessionInfo.browserId)) {
        if (userId.equals(loginInfo.userId)) {
          sessionInfo.sessionId = loginInfo.sessionId;
          break;
        }
      }
    }

    long expireOn = System.currentTimeMillis() + Config.getConfig().loginExpireInterval;
    if (sessionInfo.sessionId == null) {
      sessionInfo.sessionId = getRandomId();
      Config config = Config.getConfig();
      loginInfoDb.add(
          new LoginInfo(
              sessionInfo.browserId,
              sessionInfo.sessionId,
              userId,
              expireOn,
              rememberAccount,
              config.defaultStyle,
              config.defaultItemsPerPage,
              config.defaultFeedDateFormat));
      LOG.info(String.format("Logging in in a new session. User: %s", user));
    } else {
      loginInfoDb.updateExpireTime(sessionInfo.browserId, sessionInfo.sessionId, expireOn);
      LOG.info(String.format("Logging in in an existing session. User: %s", user));
    }

    WebUtils.saveCookies(
        httpServletResponse, secured, sessionInfo.browserId, sessionInfo.sessionId);

    httpServletResponse.sendRedirect("/");
  }

  private String getRandomId() {
    SecureRandom secureRandom = new SecureRandom();
    return "" + secureRandom.nextLong();
  }

  private void addLoginParams(Request request, LoginInfo loginInfo) {
    MultiMap<String> params = new MultiMap<>();
    if (loginInfo != null && loginInfo.rememberAccount) {
      params.put(PARAM_USER_ID, loginInfo.userId);
    }
    request.setParameters(params);
  }

  private void logOut(String browserId) throws Exception {
    // ttt2 the right way to do it is to go through all the sessions of the current browser, which
    // would require a new field and a new index;
    // not sure if it's worth it, but this would work: A logs in, forgets to log out, B delets the
    // cookies, logs in, A sees B is logged in, then B
    // restores the cookies and uses A's account
    if (browserId == null) {
      return;
    }

    List<LoginInfo> loginInfos = loginInfoDb.getLoginsForBrowser(browserId);
    long expireTarget = System.currentTimeMillis() - Utils.ONE_DAY;
    for (LoginInfo loginInfo : loginInfos) {
      if (loginInfo.expiresOn <= expireTarget) {
        LOG.info(String.format("LoginInfo %s is enough in the past", loginInfo));
      } else {
        LOG.info(String.format("Logging out: %s", loginInfo));
        loginInfoDb.updateExpireTime(browserId, loginInfo.sessionId, expireTarget);
      }
    }
  }

  public static class FeedInfo {
    public String feedId;
    public int maxSeq;

    public FeedInfo(String feedId, int maxSeq) {
      this.feedId = feedId;
      this.maxSeq = maxSeq;
    }
  }

  // !!! IDEA reports this as unused, but it is called from JSP
  public static FeedInfo getFeedInfo(String feedPath) {
    if (feedPath.startsWith(PATH_FEED + "/")) {
      try {
        if (feedPath.endsWith("/")) {
          feedPath = feedPath.substring(0, feedPath.length() - 1);
        }
        int k = PATH_FEED.length() + 1;
        int p = feedPath.indexOf('/', k);
        return p >= 0
            ? new FeedInfo(feedPath.substring(k, p), Integer.parseInt(feedPath.substring(p + 1)))
            : new FeedInfo(feedPath.substring(k), -1);
      } catch (Exception e) {
        LOG.error("Exception trying to parse the feed info", e);
      }
    }

    LOG.error("Invalid path from feed: " + feedPath);
    return new FeedInfo("INVALID", -1);
  }

  // !!! IDEA reports this as unused, but it is called from JSP
  public static String getStyle(LoginInfo loginInfo) {
    StringBuilder bld = new StringBuilder();
    bld.append("<style media=\"screen\" type=\"text/css\">\n\n");
    if (loginInfo == null) {
      bld.append(Config.getConfig().defaultStyle);
    } else {
      bld.append(loginInfo.style); // ttt3 detect broken styles and return default
    }
    bld.append("</style>\n");
    return bld.toString();
  }

  /*    private void jspCodeCheck() throws Exception {
      Article.DB articleDb;
      Request request;
      String path = "";

      String feedId = ReaderHandler.getFeedId(path);
      int maxSeq = ReaderHandler.getSeq(path);

      Feed.DB feedDb = (Feed.DB)request.getAttribute(ReaderHandler.VAR_FEED_DB);

      Feed feed = feedDb.get(feedId);
      if (feed == null) {
          out.println("Feed " + feedId + " not found");
      } else {
          if (maxSeq == -1) {
              maxSeq = feed.maxSeq;
          }
          if (maxSeq < 0) {
              out.println("Feed " + feedId + " is empty");
          } else {
              ++maxSeq;
              LoginInfo loginInfo = (LoginInfo)request.getAttribute(ReaderHandler.VAR_LOGIN_INFO);
              int minSeq = Math.max(maxSeq - loginInfo.itemsPerPage, 0);
              List<Article> articles = articleDb.get(feedId, minSeq, maxSeq);
              for (Article article : articles) {
                  out.println("<a href=\"" + article.url + "\">" + article.title + "</a><br/>");
              }
          }
      }

  }
  //*/
}
public class IdentitySchema {

  private static final String IDENTITY_TABLE_PREFIX = "JBPM_ID_";

  Configuration configuration = null;
  Properties properties = null;
  Dialect dialect = null;
  Mapping mapping = null;
  String[] createSql = null;
  String[] dropSql = null;
  String[] cleanSql = null;

  ConnectionProvider connectionProvider = null;
  Connection connection = null;
  Statement statement = null;

  public IdentitySchema(Configuration configuration) {
    this.configuration = configuration;
    this.properties = configuration.getProperties();
    this.dialect = Dialect.getDialect(properties);
    try {
      // get the mapping field via reflection :-(
      Field mappingField = Configuration.class.getDeclaredField("mapping");
      mappingField.setAccessible(true);
      this.mapping = (Mapping) mappingField.get(configuration);
    } catch (Exception e) {
      throw new RuntimeException("couldn't get the hibernate mapping", e);
    }
  }

  // scripts lazy initializations /////////////////////////////////////////////

  public String[] getCreateSql() {
    if (createSql == null) {
      createSql = configuration.generateSchemaCreationScript(dialect);
    }
    return createSql;
  }

  public String[] getDropSql() {
    if (dropSql == null) {
      dropSql = configuration.generateDropSchemaScript(dialect);
    }
    return dropSql;
  }

  public String[] getCleanSql() {
    if (cleanSql == null) {
      // loop over all foreign key constraints
      List dropForeignKeysSql = new ArrayList();
      List createForeignKeysSql = new ArrayList();
      Iterator iter = configuration.getTableMappings();
      while (iter.hasNext()) {
        Table table = (Table) iter.next();
        if (table.isPhysicalTable()) {
          Iterator subIter = table.getForeignKeyIterator();
          while (subIter.hasNext()) {
            ForeignKey fk = (ForeignKey) subIter.next();
            if (fk.isPhysicalConstraint()) {
              // collect the drop key constraint
              dropForeignKeysSql.add(
                  fk.sqlDropString(
                      dialect,
                      properties.getProperty(Environment.DEFAULT_CATALOG),
                      properties.getProperty(Environment.DEFAULT_SCHEMA)));
              createForeignKeysSql.add(
                  fk.sqlCreateString(
                      dialect,
                      mapping,
                      properties.getProperty(Environment.DEFAULT_CATALOG),
                      properties.getProperty(Environment.DEFAULT_SCHEMA)));
            }
          }
        }
      }

      List deleteSql = new ArrayList();
      iter = configuration.getTableMappings();
      while (iter.hasNext()) {
        Table table = (Table) iter.next();
        deleteSql.add("delete from " + table.getName());
      }

      List cleanSqlList = new ArrayList();
      cleanSqlList.addAll(dropForeignKeysSql);
      cleanSqlList.addAll(deleteSql);
      cleanSqlList.addAll(createForeignKeysSql);

      cleanSql = (String[]) cleanSqlList.toArray(new String[cleanSqlList.size()]);
    }
    return cleanSql;
  }

  // runtime table detection //////////////////////////////////////////////////

  public boolean hasIdentityTables() {
    return (getIdentityTables().size() > 0);
  }

  public List getIdentityTables() {
    // delete all the data in the jbpm tables
    List jbpmTableNames = new ArrayList();
    try {
      createConnection();
      ResultSet resultSet = connection.getMetaData().getTables("", "", null, null);
      while (resultSet.next()) {
        String tableName = resultSet.getString("TABLE_NAME");
        if ((tableName != null)
            && (tableName.length() > 5)
            && (IDENTITY_TABLE_PREFIX.equalsIgnoreCase(tableName.substring(0, 5)))) {
          jbpmTableNames.add(tableName);
        }
      }
    } catch (SQLException e) {
      throw new RuntimeException("couldn't get the jbpm table names");
    } finally {
      closeConnection();
    }
    return jbpmTableNames;
  }

  // script execution methods /////////////////////////////////////////////////

  public void dropSchema() {
    execute(getDropSql());
  }

  public void createSchema() {
    execute(getCreateSql());
  }

  public void cleanSchema() {
    execute(getCleanSql());
  }

  public void saveSqlScripts(String dir, String prefix) {
    try {
      new File(dir).mkdirs();
      saveSqlScript(dir + "/" + prefix + ".drop.sql", getDropSql());
      saveSqlScript(dir + "/" + prefix + ".create.sql", getCreateSql());
      saveSqlScript(dir + "/" + prefix + ".clean.sql", getCleanSql());
      new SchemaExport(configuration)
          .setDelimiter(getSqlDelimiter())
          .setOutputFile(dir + "/" + prefix + ".drop.create.sql")
          .create(true, false);
    } catch (Exception e) {
      throw new RuntimeException("couldn't generate scripts", e);
    }
  }

  // main /////////////////////////////////////////////////////////////////////

  public static void main(String[] args) {
    try {
      if ((args != null) && (args.length == 1) && ("create".equalsIgnoreCase(args[0]))) {
        new IdentitySchema(IdentitySessionFactory.createConfiguration()).createSchema();
      } else if ((args != null) && (args.length == 1) && ("drop".equalsIgnoreCase(args[0]))) {
        new IdentitySchema(IdentitySessionFactory.createConfiguration()).dropSchema();
      } else if ((args != null) && (args.length == 1) && ("clean".equalsIgnoreCase(args[0]))) {
        new IdentitySchema(IdentitySessionFactory.createConfiguration()).cleanSchema();
      } else if ((args != null) && (args.length == 3) && ("scripts".equalsIgnoreCase(args[0]))) {
        new IdentitySchema(IdentitySessionFactory.createConfiguration())
            .saveSqlScripts(args[1], args[2]);
      } else {
        System.err.println("syntax: JbpmSchema create");
        System.err.println("syntax: JbpmSchema drop");
        System.err.println("syntax: JbpmSchema clean");
        System.err.println("syntax: JbpmSchema scripts <dir> <prefix>");
      }
    } catch (Exception e) {
      e.printStackTrace();
      throw new RuntimeException(e);
    }
  }

  private void saveSqlScript(String fileName, String[] sql) throws FileNotFoundException {
    FileOutputStream fileOutputStream = new FileOutputStream(fileName);
    PrintStream printStream = new PrintStream(fileOutputStream);
    for (int i = 0; i < sql.length; i++) {
      printStream.println(sql[i] + getSqlDelimiter());
    }
  }

  // sql script execution /////////////////////////////////////////////////////

  public void execute(String[] sqls) {
    String sql = null;
    String showSqlText = properties.getProperty("hibernate.show_sql");
    boolean showSql = ("true".equalsIgnoreCase(showSqlText));

    try {
      createConnection();
      statement = connection.createStatement();

      for (int i = 0; i < sqls.length; i++) {
        sql = sqls[i];
        String delimitedSql = sql + getSqlDelimiter();

        if (showSql) log.debug(delimitedSql);
        statement.executeUpdate(delimitedSql);
      }

    } catch (SQLException e) {
      e.printStackTrace();
      throw new RuntimeException("couldn't execute sql '" + sql + "'", e);
    } finally {
      closeConnection();
    }
  }

  private void closeConnection() {
    try {
      if (statement != null) statement.close();
      if (connection != null) {
        JDBCExceptionReporter.logWarnings(connection.getWarnings());
        connection.clearWarnings();
        connectionProvider.closeConnection(connection);
        connectionProvider.close();
      }
    } catch (Exception e) {
      System.err.println("Could not close connection");
      e.printStackTrace();
    }
  }

  private void createConnection() throws SQLException {
    connectionProvider = ConnectionProviderFactory.newConnectionProvider(properties);
    connection = connectionProvider.getConnection();
    if (!connection.getAutoCommit()) {
      connection.commit();
      connection.setAutoCommit(true);
    }
  }

  public Properties getProperties() {
    return properties;
  }

  // sql delimiter ////////////////////////////////////////////////////////////

  private static String sqlDelimiter = null;

  private synchronized String getSqlDelimiter() {
    if (sqlDelimiter == null) {
      sqlDelimiter = properties.getProperty("jbpm.sql.delimiter", ";");
    }
    return sqlDelimiter;
  }

  // logger ///////////////////////////////////////////////////////////////////

  private static final Log log = LogFactory.getLog(IdentitySchema.class);
}
Example #17
0
/**
 * MDC Connector
 *
 * @author yjiang
 */
public abstract class MDCConnector extends IoHandlerAdapter {

  static final Log log = LogFactory.getLog(MDCConnector.class);

  /** the the max size of a packet, 32KB */
  static int MAX_SIZE = MDCServer.MAX_SIZE;

  protected Selector selector;
  protected IoConnector connector;
  protected static Configuration _conf;

  protected static boolean inited = false;

  /** Close. */
  public void close() {
    if (selector != null) {
      selector.wakeup();
      try {
        selector.close();
      } catch (IOException e1) {
        log.warn("close selector fails", e1);
      } finally {
        selector = null;
      }
    }

    if (connector != null) {
      connector.dispose();
      connector = null;
    }
  }

  /** Instantiates a new MDC connector. */
  protected MDCConnector() {}

  /** Inits the. */
  public static synchronized void init() {
    if (inited) {
      return;
    }

    _conf = Config.getConfig();

    /** initialize app command */
    Command.init();

    /** initialize the RSA key, hardcode 2048 bits */
    TConn.pub_key = SystemConfig.s("pub_key", null);
    if (TConn.pub_key == null) {
      Key k = RSA.generate(2048);
      TConn.pri_key = k.pri_key;
      TConn.pub_key = k.pub_key;

      /** set back in database */
      SystemConfig.setConfig("pri_key", TConn.pri_key);
      SystemConfig.setConfig("pub_key", TConn.pub_key);
    } else {

      /** get from the database */
      TConn.pri_key = SystemConfig.s("pri_key", null);
    }

    inited = true;
  }

  /**
   * Service.
   *
   * @param o the o
   * @param session the session
   */
  void service(IoBuffer o, IoSession session) {
    try {
      // System.out.println(o.remaining() + "/" + o.capacity());

      session.setAttribute("last", System.currentTimeMillis());

      SimpleIoBuffer in = (SimpleIoBuffer) session.getAttribute("buf");
      if (in == null) {
        in = SimpleIoBuffer.create(4096);
        session.setAttribute("buf", in);
      }
      byte[] data = new byte[o.remaining()];
      o.get(data);
      in.append(data);

      // log.debug("recv: " + data.length + ", " +
      // session.getRemoteAddress());

      while (in.length() > 5) {
        in.mark();
        /**
         * Byte 1: head of the package<br>
         * bit 7-6: "01", indicator of MDC<br>
         * bit 5: encrypt indicator, "0": no; "1": encrypted<br>
         * bit 4: zip indicator, "0": no, "1": ziped<br>
         * bit 0-3: reserved<br>
         * Byte 2-5: length of data<br>
         * Byte[…]: data array<br>
         */
        byte head = in.read();
        /** test the head indicator, if not correct close it */
        if ((head & 0xC0) != 0x40) {
          log.info("flag is not correct! flag:" + head + ",from: " + session.getRemoteAddress());

          session.close(true);
          return;
        }

        int len = in.getInt();

        if (len <= 0 || len > MAX_SIZE) {
          log.error(
              "mdcconnector.Wrong lendth: "
                  + len
                  + "/"
                  + MAX_SIZE
                  + " - "
                  + session.getRemoteAddress());
          session.close(true);
          break;
        }

        if (in.length() < len) {
          in.reset();
          break;
        } else {
          // do it
          // log.info("stub.package.size: " + len);

          byte[] b = new byte[len];
          in.read(b);

          if (TConn.DEBUG) {
            log.debug("recv: " + Bean.toString(b));
          }

          /** test the zip flag */
          if ((head & 0x10) > 0) {
            b = Zip.unzip(b);
          }

          final TConn d = (TConn) session.getAttribute("conn");
          if (d != null) {
            /** test the encrypted flag */
            if ((head & 0x20) > 0) {
              b = DES.decode(b, d.deskey);
            }

            final byte[] bb = b;
            /** test if the packet is for mdc or app */
            new WorkerTask() {

              @Override
              public void onExecute() {
                d.process(bb);
              }
            }.schedule(0);

            session.setAttribute("last", System.currentTimeMillis());
          }
        }
      }
    } catch (Throwable e) {
      log.error("closing stub: " + session.getRemoteAddress(), e);
      session.close(true);
    }
  }

  /*
   * (non-Javadoc)
   *
   * @see
   * org.apache.mina.core.service.IoHandlerAdapter#sessionCreated(org.apache
   * .mina.core.session.IoSession)
   */
  public void sessionCreated(IoSession session) throws Exception {
    String remote = session.getRemoteAddress().toString();
    log.info("stub created:" + remote);

    /** check the allow ip */
    if (TConn.ALLOW_IP == null || "*".equals(TConn.ALLOW_IP) || remote.matches(TConn.ALLOW_IP)) {
      TConn d = new TConn(session);
      session.setAttribute("conn", d);
    } else {
      log.warn("deny the connection:" + remote + ", allow ip:" + TConn.ALLOW_IP);
      session.close(true);
    }
  }

  /*
   * (non-Javadoc)
   *
   * @see
   * org.apache.mina.core.service.IoHandlerAdapter#sessionClosed(org.apache
   * .mina.core.session.IoSession)
   */
  public void sessionClosed(IoSession session) throws Exception {
    log.debug("closed stub: " + session.getRemoteAddress());
    TConn d = (TConn) session.getAttribute("conn");
    if (d != null) {
      d.close();
    }
  }

  /*
   * (non-Javadoc)
   *
   * @see
   * org.apache.mina.core.service.IoHandlerAdapter#sessionIdle(org.apache.
   * mina.core.session.IoSession, org.apache.mina.core.session.IdleStatus)
   */
  public void sessionIdle(IoSession session, IdleStatus status) throws Exception {
    if (IdleStatus.BOTH_IDLE.equals(status)) {
      Long l = (Long) session.getAttribute("last");
      if (l != null && System.currentTimeMillis() - l > 60 * 1000) {
        session.close(true);
      }
    }
  }

  /*
   * (non-Javadoc)
   *
   * @see
   * org.apache.mina.core.service.IoHandlerAdapter#messageReceived(org.apache
   * .mina.core.session.IoSession, java.lang.Object)
   */
  public void messageReceived(IoSession session, Object message) throws Exception {
    // System.out.println(message);
    if (message instanceof IoBuffer) {
      service((IoBuffer) message, session);
    }
  }

  private static MDCConnector tcpconnector;
  private static MDCConnector udpconnector;

  /**
   * @param host
   * @param port
   * @return TConn
   */
  public static synchronized TConn connectByTcp(String host, int port) {
    return connectByTcp(host, port, X.AMINUTE);
  }

  /**
   * Connect by tcp.
   *
   * @param host the host
   * @param port the port
   * @return the t conn
   */
  public static synchronized TConn connectByTcp(String host, int port, long timeout) {

    TimeStamp t = TimeStamp.create();

    try {
      if (tcpconnector == null) {
        tcpconnector = new TDCConnector();
      }

      tcpconnector.connector.setConnectTimeoutMillis(timeout);

      ConnectFuture connFuture = tcpconnector.connector.connect(new InetSocketAddress(host, port));

      connFuture.awaitUninterruptibly(timeout);
      IoSession session = connFuture.getSession();

      TConn c = new TConn(session);

      session.setAttribute("conn", c);
      return c;
    } catch (Exception e) {
      log.error(
          "error, [" + host + ":" + port + "], cost: " + t.past() + "ms, timeout=" + timeout, e);
    }

    return null;
  }

  /**
   * Connect by udp.
   *
   * @param host the host
   * @param port the port
   * @return the t conn
   */
  public static synchronized TConn connectByUdp(String host, int port) {
    try {
      if (udpconnector == null) {
        udpconnector = new UDCConnector();
      }

      ConnectFuture connFuture = udpconnector.connector.connect(new InetSocketAddress(host, port));
      connFuture.awaitUninterruptibly();
      IoSession session = connFuture.getSession();

      TConn c = new TConn(session);

      session.setAttribute("conn", c);
      return c;
    } catch (Exception e) {
      log.error("[" + host + ":" + port + "]", e);
    }
    return null;
  }

  /*
   * (non-Javadoc)
   *
   * @see
   * org.apache.mina.core.service.IoHandlerAdapter#exceptionCaught(org.apache
   * .mina.core.session.IoSession, java.lang.Throwable)
   */
  @Override
  public void exceptionCaught(IoSession session, Throwable cause) throws Exception {
    log.error(cause.getMessage(), cause);
  }
}
Example #18
0
/** @author Johan Lindquist */
public class BaseController {
  private Log _log = LogFactory.getLog(BaseController.class);

  @Autowired protected JahSpotifyService _jahSpotifyService;

  @Value(value = "${jahspotify.web.controller.default-media-expires-duration}")
  private int _defaultMediaExpirationTime;

  protected void writeResponse(
      final HttpServletResponse httpServletResponse,
      final SimpleStatusResponse simpleStatusResponse) {
    Gson gson = new Gson();
    try {
      httpServletResponse.setContentType("application/json; charset=utf-8");
      _log.debug("Serializing: " + simpleStatusResponse);
      final PrintWriter writer = httpServletResponse.getWriter();
      gson.toJson(simpleStatusResponse.getResponseStatus(), writer);
      writer.flush();
      writer.close();
    } catch (Exception e) {
      _log.error("Error while writing response: " + e.getMessage(), e);
    }
  }

  protected void writeMediaNotReadable(final HttpServletResponse httpServletResponse) {
    SimpleStatusResponse simpleStatusResponse = new SimpleStatusResponse();
    simpleStatusResponse.setResponseStatus(ResponseStatus.RESOURCE_NOT_FOUND);
    writeResponse(httpServletResponse, simpleStatusResponse);
  }

  protected void writeResponseGeneric(
      final HttpServletResponse httpServletResponse, final Object object) {
    this.writeResponseGenericWithDate(httpServletResponse, null, object);
  }

  protected void writeResponseGenericWithDate(
      final HttpServletResponse httpServletResponse, final Date lastModified, final Object object) {
    writeResponseGenericWithDate(
        httpServletResponse, lastModified, _defaultMediaExpirationTime, object);
  }

  protected void writeResponseGenericWithDate(
      final HttpServletResponse httpServletResponse,
      final Date lastModified,
      final int expirationTime,
      final Object object) {
    Gson gson = new Gson();
    try {
      httpServletResponse.setContentType("application/json; charset=utf-8");
      if (lastModified != null) {
        httpServletResponse.addHeader("Expires", createDateHeader(expirationTime));
        httpServletResponse.addHeader("Last-Modified", toHttpDate(lastModified));
      }
      _log.debug("Serializing: " + object);
      final PrintWriter writer = httpServletResponse.getWriter();
      gson.toJson(object, writer);
      writer.flush();
      writer.close();
    } catch (Exception e) {
      _log.error("Error while writing response: " + e.getMessage(), e);
    }
  }

  protected String createDateHeader(int expires) {
    final Calendar utc = GregorianCalendar.getInstance(TimeZone.getTimeZone("UTC"));
    utc.add(Calendar.SECOND, expires);
    return toHttpDate(utc.getTime());
  }

  protected String toHttpDate(Date date) {
    SimpleDateFormat simpleDateFormat = new SimpleDateFormat("EEE, d MMM yyyy HH:mm:ss z");
    return simpleDateFormat.format(date);
  }

  protected Link retrieveLink(final HttpServletRequest httpServletRequest) {
    String uri =
        httpServletRequest
            .getRequestURI()
            .substring(httpServletRequest.getRequestURI().lastIndexOf("/") + 1);
    _log.debug("Extracted URI: " + uri);
    return Link.create(uri);
  }

  protected <T> T readRequest(final HttpServletRequest httpServletRequest, final Class<T> classOfT)
      throws IOException {
    final BufferedReader br = new BufferedReader(httpServletRequest.getReader());
    Gson gson = new Gson();
    return gson.fromJson(br, classOfT);
  }

  public void writeErrorResponse(final HttpServletResponse httpServletResponse, final Exception e) {
    SimpleStatusResponse simpleStatusResponse = new SimpleStatusResponse();
    simpleStatusResponse.setResponseStatus(ResponseStatus.INTERNAL_ERROR);
    writeResponse(httpServletResponse, simpleStatusResponse);
  }
}
Example #19
0
/**
 * A simple RPC mechanism.
 *
 * <p>A <i>protocol</i> is a Java interface. All parameters and return types must be one of:
 *
 * <ul>
 *   <li>a primitive type, <code>boolean</code>, <code>byte</code>, <code>char</code>, <code>short
 *       </code>, <code>int</code>, <code>long</code>, <code>float</code>, <code>double</code>, or
 *       <code>void</code>; or
 *   <li>a {@link String}; or
 *   <li>a {@link Writable}; or
 *   <li>an array of the above types
 * </ul>
 *
 * All methods in the protocol should throw only IOException. No field data of the protocol instance
 * is transmitted.
 */
@InterfaceAudience.LimitedPrivate(value = {"Common", "HDFS", "MapReduce", "Yarn"})
@InterfaceStability.Evolving
public class RPC {
  static final int RPC_SERVICE_CLASS_DEFAULT = 0;

  public enum RpcKind {
    RPC_BUILTIN((short) 1), // Used for built in calls by tests
    RPC_WRITABLE((short) 2), // Use WritableRpcEngine
    RPC_PROTOCOL_BUFFER((short) 3); // Use ProtobufRpcEngine
    static final short MAX_INDEX = RPC_PROTOCOL_BUFFER.value; // used for array size
    public final short value; // TODO make it private

    RpcKind(short val) {
      this.value = val;
    }
  }

  interface RpcInvoker {
    /**
     * Process a client call on the server side
     *
     * @param server the server within whose context this rpc call is made
     * @param protocol - the protocol name (the class of the client proxy used to make calls to the
     *     rpc server.
     * @param rpcRequest - deserialized
     * @param receiveTime time at which the call received (for metrics)
     * @return the call's return
     * @throws IOException
     */
    public Writable call(Server server, String protocol, Writable rpcRequest, long receiveTime)
        throws Exception;
  }

  static final Log LOG = LogFactory.getLog(RPC.class);

  /**
   * Get all superInterfaces that extend VersionedProtocol
   *
   * @param childInterfaces
   * @return the super interfaces that extend VersionedProtocol
   */
  static Class<?>[] getSuperInterfaces(Class<?>[] childInterfaces) {
    List<Class<?>> allInterfaces = new ArrayList<Class<?>>();

    for (Class<?> childInterface : childInterfaces) {
      if (VersionedProtocol.class.isAssignableFrom(childInterface)) {
        allInterfaces.add(childInterface);
        allInterfaces.addAll(Arrays.asList(getSuperInterfaces(childInterface.getInterfaces())));
      } else {
        LOG.warn(
            "Interface "
                + childInterface
                + " ignored because it does not extend VersionedProtocol");
      }
    }
    return allInterfaces.toArray(new Class[allInterfaces.size()]);
  }

  /**
   * Get all interfaces that the given protocol implements or extends which are assignable from
   * VersionedProtocol.
   */
  static Class<?>[] getProtocolInterfaces(Class<?> protocol) {
    Class<?>[] interfaces = protocol.getInterfaces();
    return getSuperInterfaces(interfaces);
  }

  /**
   * Get the protocol name. If the protocol class has a ProtocolAnnotation, then get the protocol
   * name from the annotation; otherwise the class name is the protocol name.
   */
  public static String getProtocolName(Class<?> protocol) {
    if (protocol == null) {
      return null;
    }
    ProtocolInfo anno = protocol.getAnnotation(ProtocolInfo.class);
    return (anno == null) ? protocol.getName() : anno.protocolName();
  }

  /**
   * Get the protocol version from protocol class. If the protocol class has a ProtocolAnnotation,
   * then get the protocol name from the annotation; otherwise the class name is the protocol name.
   */
  public static long getProtocolVersion(Class<?> protocol) {
    if (protocol == null) {
      throw new IllegalArgumentException("Null protocol");
    }
    long version;
    ProtocolInfo anno = protocol.getAnnotation(ProtocolInfo.class);
    if (anno != null) {
      version = anno.protocolVersion();
      if (version != -1) return version;
    }
    try {
      Field versionField = protocol.getField("versionID");
      versionField.setAccessible(true);
      return versionField.getLong(protocol);
    } catch (NoSuchFieldException ex) {
      throw new RuntimeException(ex);
    } catch (IllegalAccessException ex) {
      throw new RuntimeException(ex);
    }
  }

  private RPC() {} // no public ctor

  // cache of RpcEngines by protocol
  private static final Map<Class<?>, RpcEngine> PROTOCOL_ENGINES =
      new HashMap<Class<?>, RpcEngine>();

  private static final String ENGINE_PROP = "rpc.engine";

  /**
   * Set a protocol to use a non-default RpcEngine.
   *
   * @param conf configuration to use
   * @param protocol the protocol interface
   * @param engine the RpcEngine impl
   */
  public static void setProtocolEngine(Configuration conf, Class<?> protocol, Class<?> engine) {
    conf.setClass(ENGINE_PROP + "." + protocol.getName(), engine, RpcEngine.class);
  }

  // return the RpcEngine configured to handle a protocol
  static synchronized RpcEngine getProtocolEngine(Class<?> protocol, Configuration conf) {
    RpcEngine engine = PROTOCOL_ENGINES.get(protocol);
    if (engine == null) {
      Class<?> impl =
          conf.getClass(ENGINE_PROP + "." + protocol.getName(), WritableRpcEngine.class);
      engine = (RpcEngine) ReflectionUtils.newInstance(impl, conf);
      PROTOCOL_ENGINES.put(protocol, engine);
    }
    return engine;
  }

  /** A version mismatch for the RPC protocol. */
  public static class VersionMismatch extends RpcServerException {
    private static final long serialVersionUID = 0;

    private String interfaceName;
    private long clientVersion;
    private long serverVersion;

    /**
     * Create a version mismatch exception
     *
     * @param interfaceName the name of the protocol mismatch
     * @param clientVersion the client's version of the protocol
     * @param serverVersion the server's version of the protocol
     */
    public VersionMismatch(String interfaceName, long clientVersion, long serverVersion) {
      super(
          "Protocol "
              + interfaceName
              + " version mismatch. (client = "
              + clientVersion
              + ", server = "
              + serverVersion
              + ")");
      this.interfaceName = interfaceName;
      this.clientVersion = clientVersion;
      this.serverVersion = serverVersion;
    }

    /**
     * Get the interface name
     *
     * @return the java class name (eg. org.apache.hadoop.mapred.InterTrackerProtocol)
     */
    public String getInterfaceName() {
      return interfaceName;
    }

    /** Get the client's preferred version */
    public long getClientVersion() {
      return clientVersion;
    }

    /** Get the server's agreed to version. */
    public long getServerVersion() {
      return serverVersion;
    }
    /** get the rpc status corresponding to this exception */
    public RpcStatusProto getRpcStatusProto() {
      return RpcStatusProto.ERROR;
    }

    /** get the detailed rpc status corresponding to this exception */
    public RpcErrorCodeProto getRpcErrorCodeProto() {
      return RpcErrorCodeProto.ERROR_RPC_VERSION_MISMATCH;
    }
  }

  /**
   * Get a proxy connection to a remote server
   *
   * @param protocol protocol class
   * @param clientVersion client version
   * @param addr remote address
   * @param conf configuration to use
   * @return the proxy
   * @throws IOException if the far end through a RemoteException
   */
  public static <T> T waitForProxy(
      Class<T> protocol, long clientVersion, InetSocketAddress addr, Configuration conf)
      throws IOException {
    return waitForProtocolProxy(protocol, clientVersion, addr, conf).getProxy();
  }

  /**
   * Get a protocol proxy that contains a proxy connection to a remote server and a set of methods
   * that are supported by the server
   *
   * @param protocol protocol class
   * @param clientVersion client version
   * @param addr remote address
   * @param conf configuration to use
   * @return the protocol proxy
   * @throws IOException if the far end through a RemoteException
   */
  public static <T> ProtocolProxy<T> waitForProtocolProxy(
      Class<T> protocol, long clientVersion, InetSocketAddress addr, Configuration conf)
      throws IOException {
    return waitForProtocolProxy(protocol, clientVersion, addr, conf, Long.MAX_VALUE);
  }

  /**
   * Get a proxy connection to a remote server
   *
   * @param protocol protocol class
   * @param clientVersion client version
   * @param addr remote address
   * @param conf configuration to use
   * @param connTimeout time in milliseconds before giving up
   * @return the proxy
   * @throws IOException if the far end through a RemoteException
   */
  public static <T> T waitForProxy(
      Class<T> protocol,
      long clientVersion,
      InetSocketAddress addr,
      Configuration conf,
      long connTimeout)
      throws IOException {
    return waitForProtocolProxy(protocol, clientVersion, addr, conf, connTimeout).getProxy();
  }

  /**
   * Get a protocol proxy that contains a proxy connection to a remote server and a set of methods
   * that are supported by the server
   *
   * @param protocol protocol class
   * @param clientVersion client version
   * @param addr remote address
   * @param conf configuration to use
   * @param connTimeout time in milliseconds before giving up
   * @return the protocol proxy
   * @throws IOException if the far end through a RemoteException
   */
  public static <T> ProtocolProxy<T> waitForProtocolProxy(
      Class<T> protocol,
      long clientVersion,
      InetSocketAddress addr,
      Configuration conf,
      long connTimeout)
      throws IOException {
    return waitForProtocolProxy(protocol, clientVersion, addr, conf, 0, null, connTimeout);
  }

  /**
   * Get a proxy connection to a remote server
   *
   * @param protocol protocol class
   * @param clientVersion client version
   * @param addr remote address
   * @param conf configuration to use
   * @param rpcTimeout timeout for each RPC
   * @param timeout time in milliseconds before giving up
   * @return the proxy
   * @throws IOException if the far end through a RemoteException
   */
  public static <T> T waitForProxy(
      Class<T> protocol,
      long clientVersion,
      InetSocketAddress addr,
      Configuration conf,
      int rpcTimeout,
      long timeout)
      throws IOException {
    return waitForProtocolProxy(protocol, clientVersion, addr, conf, rpcTimeout, null, timeout)
        .getProxy();
  }

  /**
   * Get a protocol proxy that contains a proxy connection to a remote server and a set of methods
   * that are supported by the server
   *
   * @param protocol protocol class
   * @param clientVersion client version
   * @param addr remote address
   * @param conf configuration to use
   * @param rpcTimeout timeout for each RPC
   * @param timeout time in milliseconds before giving up
   * @return the proxy
   * @throws IOException if the far end through a RemoteException
   */
  public static <T> ProtocolProxy<T> waitForProtocolProxy(
      Class<T> protocol,
      long clientVersion,
      InetSocketAddress addr,
      Configuration conf,
      int rpcTimeout,
      RetryPolicy connectionRetryPolicy,
      long timeout)
      throws IOException {
    long startTime = Time.now();
    IOException ioe;
    while (true) {
      try {
        return getProtocolProxy(
            protocol,
            clientVersion,
            addr,
            UserGroupInformation.getCurrentUser(),
            conf,
            NetUtils.getDefaultSocketFactory(conf),
            rpcTimeout,
            connectionRetryPolicy);
      } catch (ConnectException se) { // namenode has not been started
        LOG.info("Server at " + addr + " not available yet, Zzzzz...");
        ioe = se;
      } catch (SocketTimeoutException te) { // namenode is busy
        LOG.info("Problem connecting to server: " + addr);
        ioe = te;
      } catch (NoRouteToHostException nrthe) { // perhaps a VIP is failing over
        LOG.info("No route to host for server: " + addr);
        ioe = nrthe;
      }
      // check if timed out
      if (Time.now() - timeout >= startTime) {
        throw ioe;
      }

      // wait for retry
      try {
        Thread.sleep(1000);
      } catch (InterruptedException ie) {
        // IGNORE
      }
    }
  }

  /**
   * Construct a client-side proxy object that implements the named protocol, talking to a server at
   * the named address.
   *
   * @param <T>
   */
  public static <T> T getProxy(
      Class<T> protocol,
      long clientVersion,
      InetSocketAddress addr,
      Configuration conf,
      SocketFactory factory)
      throws IOException {
    return getProtocolProxy(protocol, clientVersion, addr, conf, factory).getProxy();
  }

  /**
   * Get a protocol proxy that contains a proxy connection to a remote server and a set of methods
   * that are supported by the server
   *
   * @param protocol protocol class
   * @param clientVersion client version
   * @param addr remote address
   * @param conf configuration to use
   * @param factory socket factory
   * @return the protocol proxy
   * @throws IOException if the far end through a RemoteException
   */
  public static <T> ProtocolProxy<T> getProtocolProxy(
      Class<T> protocol,
      long clientVersion,
      InetSocketAddress addr,
      Configuration conf,
      SocketFactory factory)
      throws IOException {
    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
    return getProtocolProxy(protocol, clientVersion, addr, ugi, conf, factory);
  }

  /**
   * Construct a client-side proxy object that implements the named protocol, talking to a server at
   * the named address.
   *
   * @param <T>
   */
  public static <T> T getProxy(
      Class<T> protocol,
      long clientVersion,
      InetSocketAddress addr,
      UserGroupInformation ticket,
      Configuration conf,
      SocketFactory factory)
      throws IOException {
    return getProtocolProxy(protocol, clientVersion, addr, ticket, conf, factory).getProxy();
  }

  /**
   * Get a protocol proxy that contains a proxy connection to a remote server and a set of methods
   * that are supported by the server
   *
   * @param protocol protocol class
   * @param clientVersion client version
   * @param addr remote address
   * @param ticket user group information
   * @param conf configuration to use
   * @param factory socket factory
   * @return the protocol proxy
   * @throws IOException if the far end through a RemoteException
   */
  public static <T> ProtocolProxy<T> getProtocolProxy(
      Class<T> protocol,
      long clientVersion,
      InetSocketAddress addr,
      UserGroupInformation ticket,
      Configuration conf,
      SocketFactory factory)
      throws IOException {
    return getProtocolProxy(protocol, clientVersion, addr, ticket, conf, factory, 0, null);
  }

  /**
   * Construct a client-side proxy that implements the named protocol, talking to a server at the
   * named address.
   *
   * @param <T>
   * @param protocol protocol
   * @param clientVersion client's version
   * @param addr server address
   * @param ticket security ticket
   * @param conf configuration
   * @param factory socket factory
   * @param rpcTimeout max time for each rpc; 0 means no timeout
   * @return the proxy
   * @throws IOException if any error occurs
   */
  public static <T> T getProxy(
      Class<T> protocol,
      long clientVersion,
      InetSocketAddress addr,
      UserGroupInformation ticket,
      Configuration conf,
      SocketFactory factory,
      int rpcTimeout)
      throws IOException {
    return getProtocolProxy(protocol, clientVersion, addr, ticket, conf, factory, rpcTimeout, null)
        .getProxy();
  }

  /**
   * Get a protocol proxy that contains a proxy connection to a remote server and a set of methods
   * that are supported by the server
   *
   * @param protocol protocol
   * @param clientVersion client's version
   * @param addr server address
   * @param ticket security ticket
   * @param conf configuration
   * @param factory socket factory
   * @param rpcTimeout max time for each rpc; 0 means no timeout
   * @return the proxy
   * @throws IOException if any error occurs
   */
  public static <T> ProtocolProxy<T> getProtocolProxy(
      Class<T> protocol,
      long clientVersion,
      InetSocketAddress addr,
      UserGroupInformation ticket,
      Configuration conf,
      SocketFactory factory,
      int rpcTimeout,
      RetryPolicy connectionRetryPolicy)
      throws IOException {
    if (UserGroupInformation.isSecurityEnabled()) {
      SaslRpcServer.init(conf);
    }
    return getProtocolEngine(protocol, conf)
        .getProxy(
            protocol,
            clientVersion,
            addr,
            ticket,
            conf,
            factory,
            rpcTimeout,
            connectionRetryPolicy);
  }

  /**
   * Construct a client-side proxy object with the default SocketFactory
   *
   * @param <T>
   * @param protocol
   * @param clientVersion
   * @param addr
   * @param conf
   * @return a proxy instance
   * @throws IOException
   */
  public static <T> T getProxy(
      Class<T> protocol, long clientVersion, InetSocketAddress addr, Configuration conf)
      throws IOException {

    return getProtocolProxy(protocol, clientVersion, addr, conf).getProxy();
  }

  /** Returns the server address for a given proxy. */
  public static InetSocketAddress getServerAddress(Object proxy) {
    return getConnectionIdForProxy(proxy).getAddress();
  }

  /**
   * Return the connection ID of the given object. If the provided object is in fact a protocol
   * translator, we'll get the connection ID of the underlying proxy object.
   *
   * @param proxy the proxy object to get the connection ID of.
   * @return the connection ID for the provided proxy object.
   */
  public static ConnectionId getConnectionIdForProxy(Object proxy) {
    if (proxy instanceof ProtocolTranslator) {
      proxy = ((ProtocolTranslator) proxy).getUnderlyingProxyObject();
    }
    RpcInvocationHandler inv = (RpcInvocationHandler) Proxy.getInvocationHandler(proxy);
    return inv.getConnectionId();
  }

  /**
   * Get a protocol proxy that contains a proxy connection to a remote server and a set of methods
   * that are supported by the server
   *
   * @param protocol
   * @param clientVersion
   * @param addr
   * @param conf
   * @return a protocol proxy
   * @throws IOException
   */
  public static <T> ProtocolProxy<T> getProtocolProxy(
      Class<T> protocol, long clientVersion, InetSocketAddress addr, Configuration conf)
      throws IOException {

    return getProtocolProxy(
        protocol, clientVersion, addr, conf, NetUtils.getDefaultSocketFactory(conf));
  }

  /**
   * Stop the proxy. Proxy must either implement {@link Closeable} or must have associated {@link
   * RpcInvocationHandler}.
   *
   * @param proxy the RPC proxy object to be stopped
   * @throws HadoopIllegalArgumentException if the proxy does not implement {@link Closeable}
   *     interface or does not have closeable {@link InvocationHandler}
   */
  public static void stopProxy(Object proxy) {
    if (proxy == null) {
      throw new HadoopIllegalArgumentException("Cannot close proxy since it is null");
    }
    try {
      if (proxy instanceof Closeable) {
        ((Closeable) proxy).close();
        return;
      } else {
        InvocationHandler handler = Proxy.getInvocationHandler(proxy);
        if (handler instanceof Closeable) {
          ((Closeable) handler).close();
          return;
        }
      }
    } catch (IOException e) {
      LOG.error("Closing proxy or invocation handler caused exception", e);
    } catch (IllegalArgumentException e) {
      LOG.error("RPC.stopProxy called on non proxy: class=" + proxy.getClass().getName(), e);
    }

    // If you see this error on a mock object in a unit test you're
    // developing, make sure to use MockitoUtil.mockProtocol() to
    // create your mock.
    throw new HadoopIllegalArgumentException(
        "Cannot close proxy - is not Closeable or "
            + "does not provide closeable invocation handler "
            + proxy.getClass());
  }

  /** Class to construct instances of RPC server with specific options. */
  public static class Builder {
    private Class<?> protocol = null;
    private Object instance = null;
    private String bindAddress = "0.0.0.0";
    private int port = 0;
    private int numHandlers = 1;
    private int numReaders = -1;
    private int queueSizePerHandler = -1;
    private boolean verbose = false;
    private final Configuration conf;
    private SecretManager<? extends TokenIdentifier> secretManager = null;
    private String portRangeConfig = null;

    public Builder(Configuration conf) {
      this.conf = conf;
    }

    /** Mandatory field */
    public Builder setProtocol(Class<?> protocol) {
      this.protocol = protocol;
      return this;
    }

    /** Mandatory field */
    public Builder setInstance(Object instance) {
      this.instance = instance;
      return this;
    }

    /** Default: 0.0.0.0 */
    public Builder setBindAddress(String bindAddress) {
      this.bindAddress = bindAddress;
      return this;
    }

    /** Default: 0 */
    public Builder setPort(int port) {
      this.port = port;
      return this;
    }

    /** Default: 1 */
    public Builder setNumHandlers(int numHandlers) {
      this.numHandlers = numHandlers;
      return this;
    }

    /** Default: -1 */
    public Builder setnumReaders(int numReaders) {
      this.numReaders = numReaders;
      return this;
    }

    /** Default: -1 */
    public Builder setQueueSizePerHandler(int queueSizePerHandler) {
      this.queueSizePerHandler = queueSizePerHandler;
      return this;
    }

    /** Default: false */
    public Builder setVerbose(boolean verbose) {
      this.verbose = verbose;
      return this;
    }

    /** Default: null */
    public Builder setSecretManager(SecretManager<? extends TokenIdentifier> secretManager) {
      this.secretManager = secretManager;
      return this;
    }

    /** Default: null */
    public Builder setPortRangeConfig(String portRangeConfig) {
      this.portRangeConfig = portRangeConfig;
      return this;
    }

    /**
     * Build the RPC Server.
     *
     * @throws IOException on error
     * @throws HadoopIllegalArgumentException when mandatory fields are not set
     */
    public Server build() throws IOException, HadoopIllegalArgumentException {
      if (this.conf == null) {
        throw new HadoopIllegalArgumentException("conf is not set");
      }
      if (this.protocol == null) {
        throw new HadoopIllegalArgumentException("protocol is not set");
      }
      if (this.instance == null) {
        throw new HadoopIllegalArgumentException("instance is not set");
      }

      return getProtocolEngine(this.protocol, this.conf)
          .getServer(
              this.protocol,
              this.instance,
              this.bindAddress,
              this.port,
              this.numHandlers,
              this.numReaders,
              this.queueSizePerHandler,
              this.verbose,
              this.conf,
              this.secretManager,
              this.portRangeConfig);
    }
  }

  /** An RPC Server. */
  public abstract static class Server extends org.apache.hadoop.ipc.Server {
    boolean verbose;

    static String classNameBase(String className) {
      String[] names = className.split("\\.", -1);
      if (names == null || names.length == 0) {
        return className;
      }
      return names[names.length - 1];
    }

    /** Store a map of protocol and version to its implementation */
    /** The key in Map */
    static class ProtoNameVer {
      final String protocol;
      final long version;

      ProtoNameVer(String protocol, long ver) {
        this.protocol = protocol;
        this.version = ver;
      }

      @Override
      public boolean equals(Object o) {
        if (o == null) return false;
        if (this == o) return true;
        if (!(o instanceof ProtoNameVer)) return false;
        ProtoNameVer pv = (ProtoNameVer) o;
        return ((pv.protocol.equals(this.protocol)) && (pv.version == this.version));
      }

      @Override
      public int hashCode() {
        return protocol.hashCode() * 37 + (int) version;
      }
    }

    /** The value in map */
    static class ProtoClassProtoImpl {
      final Class<?> protocolClass;
      final Object protocolImpl;

      ProtoClassProtoImpl(Class<?> protocolClass, Object protocolImpl) {
        this.protocolClass = protocolClass;
        this.protocolImpl = protocolImpl;
      }
    }

    ArrayList<Map<ProtoNameVer, ProtoClassProtoImpl>> protocolImplMapArray =
        new ArrayList<Map<ProtoNameVer, ProtoClassProtoImpl>>(RpcKind.MAX_INDEX);

    Map<ProtoNameVer, ProtoClassProtoImpl> getProtocolImplMap(RPC.RpcKind rpcKind) {
      if (protocolImplMapArray.size() == 0) { // initialize for all rpc kinds
        for (int i = 0; i <= RpcKind.MAX_INDEX; ++i) {
          protocolImplMapArray.add(new HashMap<ProtoNameVer, ProtoClassProtoImpl>(10));
        }
      }
      return protocolImplMapArray.get(rpcKind.ordinal());
    }

    // Register  protocol and its impl for rpc calls
    void registerProtocolAndImpl(RpcKind rpcKind, Class<?> protocolClass, Object protocolImpl) {
      String protocolName = RPC.getProtocolName(protocolClass);
      long version;

      try {
        version = RPC.getProtocolVersion(protocolClass);
      } catch (Exception ex) {
        LOG.warn("Protocol " + protocolClass + " NOT registered as cannot get protocol version ");
        return;
      }

      getProtocolImplMap(rpcKind)
          .put(
              new ProtoNameVer(protocolName, version),
              new ProtoClassProtoImpl(protocolClass, protocolImpl));
      LOG.debug(
          "RpcKind = "
              + rpcKind
              + " Protocol Name = "
              + protocolName
              + " version="
              + version
              + " ProtocolImpl="
              + protocolImpl.getClass().getName()
              + " protocolClass="
              + protocolClass.getName());
    }

    static class VerProtocolImpl {
      final long version;
      final ProtoClassProtoImpl protocolTarget;

      VerProtocolImpl(long ver, ProtoClassProtoImpl protocolTarget) {
        this.version = ver;
        this.protocolTarget = protocolTarget;
      }
    }

    VerProtocolImpl[] getSupportedProtocolVersions(RPC.RpcKind rpcKind, String protocolName) {
      VerProtocolImpl[] resultk = new VerProtocolImpl[getProtocolImplMap(rpcKind).size()];
      int i = 0;
      for (Map.Entry<ProtoNameVer, ProtoClassProtoImpl> pv :
          getProtocolImplMap(rpcKind).entrySet()) {
        if (pv.getKey().protocol.equals(protocolName)) {
          resultk[i++] = new VerProtocolImpl(pv.getKey().version, pv.getValue());
        }
      }
      if (i == 0) {
        return null;
      }
      VerProtocolImpl[] result = new VerProtocolImpl[i];
      System.arraycopy(resultk, 0, result, 0, i);
      return result;
    }

    VerProtocolImpl getHighestSupportedProtocol(RpcKind rpcKind, String protocolName) {
      Long highestVersion = 0L;
      ProtoClassProtoImpl highest = null;
      if (LOG.isDebugEnabled()) {
        LOG.debug("Size of protoMap for " + rpcKind + " =" + getProtocolImplMap(rpcKind).size());
      }
      for (Map.Entry<ProtoNameVer, ProtoClassProtoImpl> pv :
          getProtocolImplMap(rpcKind).entrySet()) {
        if (pv.getKey().protocol.equals(protocolName)) {
          if ((highest == null) || (pv.getKey().version > highestVersion)) {
            highest = pv.getValue();
            highestVersion = pv.getKey().version;
          }
        }
      }
      if (highest == null) {
        return null;
      }
      return new VerProtocolImpl(highestVersion, highest);
    }

    protected Server(
        String bindAddress,
        int port,
        Class<? extends Writable> paramClass,
        int handlerCount,
        int numReaders,
        int queueSizePerHandler,
        Configuration conf,
        String serverName,
        SecretManager<? extends TokenIdentifier> secretManager,
        String portRangeConfig)
        throws IOException {
      super(
          bindAddress,
          port,
          paramClass,
          handlerCount,
          numReaders,
          queueSizePerHandler,
          conf,
          serverName,
          secretManager,
          portRangeConfig);
      initProtocolMetaInfo(conf);
    }

    private void initProtocolMetaInfo(Configuration conf) {
      RPC.setProtocolEngine(conf, ProtocolMetaInfoPB.class, ProtobufRpcEngine.class);
      ProtocolMetaInfoServerSideTranslatorPB xlator =
          new ProtocolMetaInfoServerSideTranslatorPB(this);
      BlockingService protocolInfoBlockingService =
          ProtocolInfoService.newReflectiveBlockingService(xlator);
      addProtocol(
          RpcKind.RPC_PROTOCOL_BUFFER, ProtocolMetaInfoPB.class, protocolInfoBlockingService);
    }

    /**
     * Add a protocol to the existing server.
     *
     * @param protocolClass - the protocol class
     * @param protocolImpl - the impl of the protocol that will be called
     * @return the server (for convenience)
     */
    public Server addProtocol(RpcKind rpcKind, Class<?> protocolClass, Object protocolImpl) {
      registerProtocolAndImpl(rpcKind, protocolClass, protocolImpl);
      return this;
    }

    @Override
    public Writable call(
        RPC.RpcKind rpcKind, String protocol, Writable rpcRequest, long receiveTime)
        throws Exception {
      return getRpcInvoker(rpcKind).call(this, protocol, rpcRequest, receiveTime);
    }
  }
}
public class MilterRequestHandler implements RequestHandler, JilterHandler, StopBlockTarget {

  protected static Log logger = LogFactory.getLog(MilterRequestHandler.class.getName());
  protected SocketChannel socket = null;
  protected ArrayList<String> rcpts = null;
  protected FetchMessageCallback callback;
  protected String host = "";
  protected JilterStatus status = null;
  protected ByteArrayOutputStream bos = new ByteArrayOutputStream();
  protected static Pattern headerPattern1 = Pattern.compile("^cc|^to|^bcc");
  protected static Pattern headerPattern2 = Pattern.compile(".*<([-.+_\\d\\w]*@[-.+_\\d\\w]*)>");
  protected static Pattern headerPattern3 = Pattern.compile("([-.+_\\d\\w]*@[-.+_\\d\\w]*)");
  private static final int IDLE_TIMEOUT = 300000; // 5 minutes
  protected boolean includeBCC = false;

  public void handleRequest(SocketChannel socket, FetchMessageCallback callback) {
    this.socket = socket;
    this.callback = callback;
    includeBCC = false;
    rcpts = new ArrayList<String>();
    bos = new ByteArrayOutputStream();

    InetAddress address = socket.socket().getInetAddress();
    boolean isAllowed = Config.getConfig().getAgent().isAllowed(address);
    if (!isAllowed) {
      logger.debug(
          "attempted milter connection from disallowed address. force disconnect {address='"
              + address.getHostAddress()
              + "'}");
      try {
        socket.close();
      } catch (IOException io) {
        logger.error("failed to close milter socket.", io);
      }
      return;
    }

    ByteBuffer dataBuffer = ByteBuffer.allocateDirect(4096);
    JilterProcessor processor = new JilterProcessor(this);
    try {
      while (processor.process(socket, (ByteBuffer) dataBuffer.flip())) {
        dataBuffer.compact();
        if (this.socket.read(dataBuffer) == -1) {
          logger.debug("socket reports EOF, exiting read loop");
          break;
        }
      }
    } catch (IOException e) {
      logger.debug("Unexpected exception, connection will be closed", e);
    } finally {
      logger.debug("closing processor");
      processor.close();
      logger.debug("processor closed");
      try {
        logger.debug("closing socket");
        this.socket.close();
        logger.debug("socket closed");
      } catch (IOException e) {
        logger.debug("Unexpected exception", e);
      }
    }
  }

  public JilterStatus abort() {
    logger.debug("abort");
    return JilterStatus.SMFIS_CONTINUE;
  }

  public JilterStatus body(ByteBuffer bodyp) {
    logger.debug("jilter body()");
    long maxMessageSizeMB = Config.getConfig().getArchiver().getMaxMessageSize();
    long maxMessageSizeBytes = maxMessageSizeMB * 1024 * 1024;
    if (bodyp.array().length > maxMessageSizeBytes) {
      logger.warn(
          "milter maximum message size exceeded { size='" + bodyp.array().length + " bytes'}");
      return JilterStatus.SMFIS_REJECT;
    }
    try {
      bos.write("\n".getBytes());
      bos.write(bodyp.array());
    } catch (IOException io) {
      logger.error("jilter failed to write milter body data to byte buffer", io);
    }
    logger.debug("jilter body written");
    return JilterStatus.SMFIS_CONTINUE;
  }

  public JilterStatus close() {
    logger.debug("jilter close()");
    return JilterStatus.SMFIS_CONTINUE;
  }

  public JilterStatus connect(String hostname, InetAddress hostaddr, Properties properties) {
    rcpts = new ArrayList<String>();
    if (hostaddr != null) {
      host = hostaddr.toString();
    } else if (host != null) {
      host = hostname;
    } else {
      host = "localhost";
    }
    logger.debug("jilter connect() {from='" + hostname + "',host='" + host + "'}");
    return JilterStatus.SMFIS_CONTINUE;
  }

  public JilterStatus envfrom(String[] argv, Properties properties) {
    for (int i = 0; i < argv.length; i++) {
      logger.debug("jilter envfrom() {from='" + argv[i] + "'}");
    }
    return JilterStatus.SMFIS_CONTINUE;
  }

  public JilterStatus envrcpt(String[] argv, Properties properties) {
    for (int i = 0; i < argv.length; i++) {
      String strRecipient = argv[i];
      boolean orcptFlag = strRecipient.toLowerCase(Locale.ENGLISH).trim().contains("orcpt=");
      if (!orcptFlag) {
        logger.debug("jilter envrcpt() {to='" + strRecipient + "'}");
        String recipient =
            strRecipient.toLowerCase(Locale.ENGLISH).trim().replaceAll("<", "").replaceAll(">", "");
        rcpts.add(recipient);

        logger.debug("jilter add recipient {recipient='" + recipient + "'}");
      }
    }
    return JilterStatus.SMFIS_CONTINUE;
  }

  protected boolean shouldIgnoreBCCAddress(String address) {
    MilterServerService milterService = Config.getConfig().getMilterServerService();
    List<String> ignoreAddresses = milterService.getIgnoreBCCAddress();
    Matcher m = headerPattern2.matcher(address.toLowerCase(Locale.ENGLISH).trim());
    if (m.matches()) {
      String mailAddress = m.group(1);
      for (String ignoreAddress : ignoreAddresses) {
        if (ignoreAddress.equalsIgnoreCase(mailAddress)) return true;
      }
    } else {
      m = headerPattern3.matcher(address.toLowerCase(Locale.ENGLISH).trim());
      if (m.matches()) {
        String mailAddress = m.group(1);
        for (String ignoreAddress : ignoreAddresses) {
          if (ignoreAddress.equalsIgnoreCase(mailAddress)) return true;
        }
      }
    }
    return false;
  }

  public JilterStatus eoh() {
    logger.debug("jilter eoh()");
    // includeBCC is false if RCPT TO does not contain at least one field in TO, FROM and CC
    // this is a safety check as sometimes, RCPT TO is something differently entirely
    // and does not contain the actual recipients in the email

    MilterServerService milterService = Config.getConfig().getMilterServerService();

    if (milterService.getIncludeBCC() && includeBCC) {
      logger.debug("including BCC addresses");
      // check to see if address is flagged to ignore
      if (rcpts.size() > 0) {
        Iterator<String> i = rcpts.iterator();
        while (i.hasNext()) {
          String rcpt = i.next();
          if (shouldIgnoreBCCAddress(rcpt)) {
            logger.debug("ignore include bcc address {address='" + rcpt + "'}");
            i.remove();
          }
        }
      }

      if (rcpts.size() > 0) {
        try {
          for (int j = 0; j < rcpts.size(); j++) {
            if (j == 0) {
              bos.write("bcc: ".getBytes());
            } else {
              bos.write(",".getBytes());
            }
            bos.write(rcpts.get(j).getBytes());
          }
          bos.write("\n".getBytes());
        } catch (IOException io) {
          logger.error("jilter failed to write end of header data", io);
        }
      }
    }
    return JilterStatus.SMFIS_CONTINUE;
  }

  public JilterStatus eom(JilterEOMActions eomActions, Properties properties) {
    logger.debug("jilter eom()");
    try {
      bos.close(); // close stream
    } catch (IOException io) {
      logger.error("jilter failed to close io stream during eom", io);
    }
    byte[] messageBytes = bos.toByteArray();
    bos = new ByteArrayOutputStream();
    ByteArrayInputStream bis = new ByteArrayInputStream(messageBytes);
    try {
      logger.debug("jilter store callback execute");
      Config.getStopBlockFactory()
          .detectBlock("milter server", Thread.currentThread(), this, IDLE_TIMEOUT);
      callback.store(bis, host);
      logger.debug("jilter store callback finished");
    } catch (ArchiveException e) {
      logger.error("failed to store the message via milter", e);
      if (e.getRecoveryDirective() == ArchiveException.RecoveryDirective.REJECT) {
        logger.debug("jilter reject");
        return JilterStatus.SMFIS_REJECT;
      } else if (e.getRecoveryDirective() == ArchiveException.RecoveryDirective.RETRYLATER) {
        logger.debug("jilter temp fail");
        return JilterStatus.SMFIS_TEMPFAIL;
      }
    } catch (Throwable oome) {
      logger.error("failed to store message:" + oome.getMessage(), oome);
      return JilterStatus.SMFIS_REJECT;
    } finally {
      Config.getStopBlockFactory().endDetectBlock(Thread.currentThread());
    }
    return JilterStatus.SMFIS_CONTINUE;
  }

  public int getRequiredModifications() {
    logger.debug("jilter requiredmodifications()");
    return SMFIF_NONE;
  }

  public int getSupportedProcesses() {
    logger.debug("jilter getsupportedprocesses()");
    return PROCESS_CONNECT | PROCESS_ENVRCPT | PROCESS_HEADER | PROCESS_BODY;
  }

  public JilterStatus header(String headerf, String headerv) {

    logger.debug("jilter header {name='" + headerf + "',value='" + headerv + "'}");
    StringBuffer header = new StringBuffer();
    header.append(headerf);
    header.append(": ");
    header.append(headerv);
    header.append("\n");
    try {
      bos.write(header.toString().getBytes());
    } catch (IOException io) {
      logger.error("jilter failed to write header field", io);
    }
    Matcher m = headerPattern1.matcher(headerf.toLowerCase(Locale.ENGLISH).trim());
    if (m.matches()) {
      logger.debug("jilter found to/bcc/cc header");
      String[] addresses = headerv.split(",");
      for (int i = 0; i < addresses.length; i++) {
        includeBCC = includeBCC | rcpts.remove(addresses[i].toLowerCase(Locale.ENGLISH).trim());
        logger.debug("jilter del recipient {recipient='" + addresses[i] + "'}");
        m = headerPattern2.matcher(addresses[i].toLowerCase(Locale.ENGLISH).trim());
        if (m.matches()) {
          String mailAddress = m.group(1);
          includeBCC = includeBCC | rcpts.remove(mailAddress);
          logger.debug("jilter del recipient {recipient='" + mailAddress + "'}");
        } else {
          m = headerPattern3.matcher(addresses[i].toLowerCase(Locale.ENGLISH).trim());
          if (m.matches()) {
            String mailAddress = m.group(1);
            includeBCC = includeBCC | rcpts.remove(mailAddress);
            logger.debug("jilter del recipient {recipient='" + mailAddress + "'}");
          }
        }
      }
    }
    return JilterStatus.SMFIS_CONTINUE;
  }

  public JilterStatus helo(String helohost, Properties properties) {
    logger.debug("jilter helo() " + helohost);
    return JilterStatus.SMFIS_CONTINUE;
  }

  public void handleBlock(Thread thread) {

    try {
      if (socket != null) {
        logger.debug("close socket()");
        socket.close();
      }
    } catch (Exception e) {
      // ignored
    }
    synchronized (this) {
      if (thread != null) {
        logger.debug("interrupt thread()");
        thread.interrupt();
      }
    }
  }
}