/**
   * Setting the internal field {@link #document} directly (bypassing {@link #setDocument(URL)}) is
   * used to deplay the document loading until {@link #ready()}.
   */
  @Override
  protected void initialize(final String[] as) {
    if ("Linux".equals(System.getProperty("os.name")))
      getContext().getResourceManager().setPlatform("linux");

    final Class<?> mc = this.getClass();
    {
      final ResourceMap r = Application.getInstance().getContext().getResourceMap();
      initialScene =
          mc.getResource("/" + r.getResourcesDir() + r.getString("Application.defaultDocument"));
      templateScene =
          mc.getResource("/" + r.getResourcesDir() + r.getString("Application.templateDocument"));
    }

    // schedule the document to load in #ready()
    document = initialScene;
    for (final String p : as) {
      // ignore javaws parameters
      if ("-open".equals(p) || "-print".equals(p)) continue;
      try {
        document = new URL(p);
        break;
      } catch (final MalformedURLException e) {
        final File f = new File(p);
        if (f.canRead())
          try {
            document = f.toURL();
            break;
          } catch (final MalformedURLException e2) {
            log.warn("Cannot load '" + p + "'.", e);
          }
        else log.warn("Cannot load '" + p + "'.", e);
      }
    }
  }
  public void afterPropertiesSet() throws Exception {
    // Check the signing and verification keys match
    if (signer instanceof RsaSigner) {
      RsaVerifier verifier;
      try {
        verifier = new RsaVerifier(verifierKey);
      } catch (Exception e) {
        logger.warn("Unable to create an RSA verifier from verifierKey");
        return;
      }

      byte[] test = "test".getBytes();
      try {
        verifier.verify(test, signer.sign(test));
        logger.info("Signing and verification RSA keys match");
      } catch (InvalidSignatureException e) {
        logger.error("Signing and verification RSA keys do not match");
      }
    } else {
      // Avoid a race condition where setters are called in the wrong order. Use of == is
      // intentional.
      Assert.state(
          this.signingKey == this.verifierKey,
          "For MAC signing you do not need to specify the verifier key separately, and if you do it must match the signing key");
    }
    SignatureVerifier verifier = new MacSigner(verifierKey);
    try {
      verifier = new RsaVerifier(verifierKey);
    } catch (Exception e) {
      logger.warn("Unable to create an RSA verifier from verifierKey");
    }
    this.verifier = verifier;
  }
Пример #3
0
 private void setScriptsNames(ToolBoxDTO toolBoxDTO, String barDir)
     throws BAMToolboxDeploymentException {
   String analyticsDir = barDir + File.separator + BAMToolBoxDeployerConstants.SCRIPTS_DIR;
   if (new File(analyticsDir).exists()) {
     ArrayList<String> scriptNames = getFilesInDirectory(analyticsDir);
     int i = 0;
     for (String aFile : scriptNames) {
       if (aFile.equalsIgnoreCase(BAMToolBoxDeployerConstants.ANALYZERS_PROPERTIES_FILE)) {
         scriptNames.remove(i);
         break;
       }
       i++;
     }
     if (scriptNames.size() == 0) {
       toolBoxDTO.setScriptsParentDirectory(null);
       log.warn("No scripts available in the specified directory");
     } else {
       toolBoxDTO.setScriptsParentDirectory(analyticsDir);
       toolBoxDTO.setScriptNames(scriptNames);
       setCronForAnalyticScripts(toolBoxDTO, analyticsDir);
     }
   } else {
     log.warn("No Analytics found for toolbox :" + toolBoxDTO.getName());
   }
 }
Пример #4
0
  public ModelAndView handleRequest(HttpServletRequest request, HttpServletResponse response)
      throws Exception {
    String username = request.getParameter("username");
    String password = request.getParameter("password");
    Customer customer = null;

    if (username == null || username.length() <= 0 || password == null || password.length() <= 0) {
      log.warn("Empty username and/or password used for login");
      return ViewUtil.createErrorView("error.empty.username.or.password");
    }

    // login function is handled by the appropriate access mode handler
    customer = accessModeController.login(username, password);

    if (customer == null) {
      log.warn("Invalid login attempt with username = "******" and password = "******"error.invalid.username.or.password");
    }

    UserSession userSession = new UserSession(customer);
    request.getSession().setAttribute("userSession", userSession);
    String forwardAction = request.getParameter("forwardAction");

    if (forwardAction != null) {
      log.info("Forwarding response to original request url: " + forwardAction);
      response.sendRedirect(forwardAction);
      return null;
    } else {
      response.sendRedirect("overview.htm");
      return null;
    }
  }
  /** Main dispatch method called from the LatherBoss. */
  public LatherValue dispatch(LatherContext ctx, String method, LatherValue arg)
      throws LatherRemoteException {

    Integer agentId = null;
    if (!haService.alertTriggersHaveInitialized()) {
      if (log.isDebugEnabled()) {
        log.debug("Not ready - received request for " + method + " from " + ctx.getCallerIP());
      }
      throw new LatherRemoteException("Server still initializing");
    }

    if (log.isDebugEnabled()) {
      log.debug("Request for " + method + "() from " + ctx.getCallerIP());
    }

    if (!HAUtil.isMasterNode()) {
      log.warn("Non-primary server received communication from an agent.  Request will be denied.");
      throw new LatherRemoteException(
          "This server is not the primary node in the HA configuration. Agent request denied.");
    }

    if (secureCommands.contains(method)) {
      if (!(arg instanceof SecureAgentLatherValue)) {
        log.warn(
            "Authenticated call made from "
                + ctx.getCallerIP()
                + " which did not subclass the correct authentication class");
        throw new LatherRemoteException("Unauthorized agent denied");
      }

      String agentToken = ((SecureAgentLatherValue) arg).getAgentToken();
      validateAgent(ctx, agentToken);
      synchronized (tokensToTime) {
        tokensToTime.put(agentToken, System.currentTimeMillis());
      }
      try {
        Agent a = agentManager.getAgent(agentToken);
        agentId = a.getId();
      } catch (AgentNotFoundException e) {
        log.debug(e, e);
      }
    }

    AgentConnection conn = null;
    long start = 0;
    try {
      conn = agentManager.getAgentConnection(method, ctx.getCallerIP(), agentId);
      start = now();
      return runCommand(ctx, method, arg);
    } catch (LatherRemoteException e) {
      concurrentStatsCollector.addStat(1, LATHER_REMOTE_EXCEPTION);
      throw e;
    } finally {
      if (conn != null) {
        agentManager.disconnectAgent(conn);
      }
      long duration = now() - start;
      concurrentStatsCollector.addStat(duration, LATHER_RUN_COMMAND_TIME);
    }
  }
 protected String generateCacheKey() {
   boolean isVisitor = true;
   String role = "V";
   String principalName = ctx.getPrincipal().getName();
   try {
     if (getPage().isAdministrator(principalName)) {
       role = "A";
     } else if (getPage().isContributor(principalName)) {
       role = "C";
     }
   } catch (ClientException e) {
     LOG.warn(
         "Unable to get site adapter while generating cache key so cached page is used: "
             + e.getMessage());
   }
   if (isVisitor) {
     String dateStr = "";
     try {
       dateStr =
           Long.toString(
               ((Calendar) getDocument().getPropertyValue("dc:modified")).getTime().getTime());
     } catch (PropertyException e) {
       LOG.warn("Unable to get property 'dc:modified': " + e.getMessage());
     } catch (ClientException e) {
       LOG.warn("Unable to get property 'dc:modified': " + e.getMessage());
     }
     return generateCacheName() + "-" + getPath() + "-" + role + "-" + dateStr;
   } else {
     return CacheBlock.NO_CACHE_KEY;
   }
 }
 /**
  * endTask() can fail and the only way to recover out of it is for the {@link
  * org.apache.hadoop.hbase.master.SplitLogManager} to timeout the task node.
  *
  * @param slt
  * @param ctr
  */
 @Override
 public void endTask(SplitLogTask slt, AtomicLong ctr, SplitTaskDetails details) {
   ZkSplitTaskDetails zkDetails = (ZkSplitTaskDetails) details;
   String task = zkDetails.getTaskNode();
   int taskZKVersion = zkDetails.getCurTaskZKVersion().intValue();
   try {
     if (ZKUtil.setData(watcher, task, slt.toByteArray(), taskZKVersion)) {
       LOG.info("successfully transitioned task " + task + " to final state " + slt);
       ctr.incrementAndGet();
       return;
     }
     LOG.warn(
         "failed to transistion task "
             + task
             + " to end state "
             + slt
             + " because of version mismatch ");
   } catch (KeeperException.BadVersionException bve) {
     LOG.warn(
         "transisition task " + task + " to " + slt + " failed because of version mismatch", bve);
   } catch (KeeperException.NoNodeException e) {
     LOG.fatal(
         "logic error - end task " + task + " " + slt + " failed because task doesn't exist", e);
   } catch (KeeperException e) {
     LOG.warn("failed to end task, " + task + " " + slt, e);
   }
   SplitLogCounters.tot_wkr_final_transition_failed.incrementAndGet();
 }
Пример #8
0
  @WebMethod
  @Path("/setProperty")
  @Produces("text/plain")
  @GET
  public String setProperty(
      @WebParam(name = "sessionid", partName = "sessionid") @QueryParam("sessionid")
          String sessionid,
      @WebParam(name = "propName", partName = "propName") @QueryParam("propName") String propName,
      @WebParam(name = "propType", partName = "propType") @QueryParam("propType") String propType,
      @WebParam(name = "propValue", partName = "propValue") @QueryParam("propValue")
          String propValue) {
    if (StringUtils.isBlank(sessionid)
        || StringUtils.isBlank(propName)
        || StringUtils.isBlank(propType)
        || StringUtils.isBlank(propValue)) {
      LOG.warn("IllegalArgument: One or more of the parameters were empty or null");
      throw new RuntimeException(
          "IllegalArgument: One or more of the parameters were empty or null");
    }

    Session session = establishSession(sessionid);
    if (!securityService.isSuperUser()) {
      LOG.warn("NonSuperUser trying to adjust configuration: " + session.getUserId());
      throw new RuntimeException(
          "NonSuperUser trying to adjust configuration: " + session.getUserId());
    }
    return changeConfigValue(propName, propType, propValue);
  }
Пример #9
0
  /**
   * Handle a Magnet request via a socket (for TCP handling). Deiconify the application, fire MAGNET
   * request and return true as a sign that LimeWire is running.
   */
  public void fireControlThread(Socket socket, boolean magnet) {
    LOG.trace("enter fireControl");

    Thread.currentThread().setName("IncomingControlThread");
    try {
      // Only allow control from localhost
      if (!NetworkUtils.isLocalHost(socket)) {
        if (LOG.isWarnEnabled())
          LOG.warn("Invalid control request from: " + socket.getInetAddress().getHostAddress());
        return;
      }

      // First read extra parameter
      socket.setSoTimeout(Constants.TIMEOUT);
      ByteReader br = new ByteReader(socket.getInputStream());
      // read the first line. if null, throw an exception
      String line = br.readLine();
      socket.setSoTimeout(0);

      BufferedOutputStream out = new BufferedOutputStream(socket.getOutputStream());
      String s = CommonUtils.getUserName() + "\r\n";
      // system internal, so use system encoding
      byte[] bytes = s.getBytes();
      out.write(bytes);
      out.flush();
      if (magnet) handleMagnetRequest(line);
      else handleTorrentRequest(line);
    } catch (IOException e) {
      LOG.warn("Exception while responding to control request", e);
    } finally {
      IOUtils.close(socket);
    }
  }
  /**
   * Inspect the log directory to recover any log file without
   * an active region server.
   */
  void splitLogAfterStartup() {
    boolean retrySplitting = !conf.getBoolean("hbase.hlog.split.skip.errors",
        HLog.SPLIT_SKIP_ERRORS_DEFAULT);
    Path logsDirPath = new Path(this.rootdir, HConstants.HREGION_LOGDIR_NAME);
    do {
      if (master.isStopped()) {
        LOG.warn("Master stopped while splitting logs");
        break;
      }
      List<ServerName> serverNames = new ArrayList<ServerName>();
      try {
        if (!this.fs.exists(logsDirPath)) return;
        FileStatus[] logFolders = FSUtils.listStatus(this.fs, logsDirPath, null);
        // Get online servers after getting log folders to avoid log folder deletion of newly
        // checked in region servers . see HBASE-5916
        Set<ServerName> onlineServers = ((HMaster) master).getServerManager().getOnlineServers()
            .keySet();

        if (logFolders == null || logFolders.length == 0) {
          LOG.debug("No log files to split, proceeding...");
          return;
        }
        for (FileStatus status : logFolders) {
          String sn = status.getPath().getName();
          // truncate splitting suffix if present (for ServerName parsing)
          if (sn.endsWith(HLog.SPLITTING_EXT)) {
            sn = sn.substring(0, sn.length() - HLog.SPLITTING_EXT.length());
          }
          ServerName serverName = ServerName.parseServerName(sn);
          if (!onlineServers.contains(serverName)) {
            LOG.info("Log folder " + status.getPath() + " doesn't belong "
                + "to a known region server, splitting");
            serverNames.add(serverName);
          } else {
            LOG.info("Log folder " + status.getPath()
                + " belongs to an existing region server");
          }
        }
        splitLog(serverNames);
        retrySplitting = false;
      } catch (IOException ioe) {
        LOG.warn("Failed splitting of " + serverNames, ioe);
        if (!checkFileSystem()) {
          LOG.warn("Bad Filesystem, exiting");
          Runtime.getRuntime().halt(1);
        }
        try {
          if (retrySplitting) {
            Thread.sleep(conf.getInt(
              "hbase.hlog.split.failure.retry.interval", 30 * 1000));
          }
        } catch (InterruptedException e) {
          LOG.warn("Interrupted, aborting since cannot return w/o splitting");
          Thread.currentThread().interrupt();
          retrySplitting = false;
          Runtime.getRuntime().halt(1);
        }
      }
    } while (retrySplitting);
  }
  @Override
  protected void doOpen() throws Exception {
    Assert.notNull(resource, "Input resource must be set");
    Assert.notNull(recordSeparatorPolicy, "RecordSeparatorPolicy must be set");

    noInput = true;
    if (!resource.exists()) {
      if (strict) {
        throw new IllegalStateException(
            "Input resource must exist (reader is in 'strict' mode): " + resource);
      }
      logger.warn("Input resource does not exist " + resource.getDescription());
      return;
    }

    if (!resource.isReadable()) {
      if (strict) {
        throw new IllegalStateException(
            "Input resource must be readable (reader is in 'strict' mode): " + resource);
      }
      logger.warn("Input resource is not readable " + resource.getDescription());
      return;
    }

    reader = bufferedReaderFactory.create(resource, encoding);
    for (int i = 0; i < linesToSkip; i++) {
      String line = readLine();
      if (skippedLinesCallback != null) {
        skippedLinesCallback.handleLine(line);
      }
    }
    noInput = false;
  }
Пример #12
0
  public void start(ResourceContext context) throws Exception {
    this.context = context;
    log.debug(
        "Starting connection to "
            + context.getResourceType()
            + "["
            + context.getResourceKey()
            + "]...");

    // If connecting to the EMS fails, log a warning but still succeed in starting. getAvailablity()
    // will keep
    // trying to connect each time it is called.
    try {
      internalStart();
    } catch (Exception e) {
      log.warn(
          "Failed to connect to "
              + context.getResourceType()
              + "["
              + context.getResourceKey()
              + "].",
          e);
    }

    if (connection == null) {
      log.warn(
          "Unable to connect to "
              + context.getResourceType()
              + "["
              + context.getResourceKey()
              + "].");
    }
  }
Пример #13
0
  /**
   * Get PID from a pid-file.
   *
   * @param pidFileName Name of the pid-file.
   * @return the PID string read from the pid-file. Returns null if the pidFileName points to a
   *     non-existing file or if read fails from the file.
   */
  public static String getPidFromPidFile(String pidFileName) {
    BufferedReader pidFile = null;
    FileReader fReader = null;
    String pid = null;

    try {
      fReader = new FileReader(pidFileName);
      pidFile = new BufferedReader(fReader);
    } catch (FileNotFoundException f) {
      LOG.debug("PidFile doesn't exist : " + pidFileName);
      return pid;
    }

    try {
      pid = pidFile.readLine();
    } catch (IOException i) {
      LOG.error("Failed to read from " + pidFileName);
    } finally {
      try {
        if (fReader != null) {
          fReader.close();
        }
        try {
          if (pidFile != null) {
            pidFile.close();
          }
        } catch (IOException i) {
          LOG.warn("Error closing the stream " + pidFile);
        }
      } catch (IOException i) {
        LOG.warn("Error closing the stream " + fReader);
      }
    }
    return pid;
  }
Пример #14
0
 private List prepareReferenceList(List attachmentList) {
   List list = new ArrayList();
   if (attachmentList == null) {
     return list;
   }
   for (int i = 0; i < attachmentList.size(); i++) {
     ContentResource cr = null;
     AttachmentIfc attach = (AttachmentIfc) attachmentList.get(i);
     try {
       log.debug("*** resourceId=" + attach.getResourceId());
       cr = AssessmentService.getContentHostingService().getResource(attach.getResourceId());
     } catch (PermissionException e) {
       log.warn(
           "ContentHostingService.getResource() throws PermissionException=" + e.getMessage());
     } catch (IdUnusedException e) {
       log.warn("ContentHostingService.getResource() throws IdUnusedException=" + e.getMessage());
       // <-- bad sign, some left over association of question and resource,
       // use case: user remove resource in file picker, then exit modification without
       // proper cancellation by clicking at the left nav instead of "cancel".
       // Also in this use case, any added resource would be left orphan.
       AssessmentService assessmentService = new AssessmentService();
       assessmentService.removeItemAttachment(attach.getAttachmentId().toString());
     } catch (TypeException e) {
       log.warn("ContentHostingService.getResource() throws TypeException=" + e.getMessage());
     }
     if (cr != null) {
       Reference ref = EntityManager.newReference(cr.getReference());
       log.debug("*** ref=" + ref);
       if (ref != null) list.add(ref);
     }
   }
   return list;
 }
Пример #15
0
  /**
   * 通过反射,获得定义Class时声明的父类的范型参数的类型.
   *
   * @param clazz clazz The class to introspect
   * @param index the Index of the generic declaration,start from 0.
   * @return the index generic declaration, or <code>Object.class</code> if cannot be determined
   */
  public static Class getSuperClassGenricType(Class clazz, int index) {

    Type genType = clazz.getGenericSuperclass();

    if (!(genType instanceof ParameterizedType)) {
      log.warn(clazz.getSimpleName() + "'s superclass not ParameterizedType");
      return Object.class;
    }

    Type[] params = ((ParameterizedType) genType).getActualTypeArguments();

    if (index >= params.length || index < 0) {
      log.warn(
          "Index: "
              + index
              + ", Size of "
              + clazz.getSimpleName()
              + "'s Parameterized Type: "
              + params.length);
      return Object.class;
    }
    if (!(params[index] instanceof Class)) {
      log.warn(clazz.getSimpleName() + " not set the actual class on superclass generic parameter");
      return Object.class;
    }
    return (Class) params[index];
  }
Пример #16
0
 /**
  * Computes the default Hadoop configuration path.
  *
  * @param environmentVariables the current environment variables
  * @return the detected configuration path, or {@code null} if the configuration path is not found
  * @since 0.6.0
  */
 public static URL getConfigurationPath(Map<String, String> environmentVariables) {
   if (environmentVariables == null) {
     throw new IllegalArgumentException("environmentVariables must not be null"); // $NON-NLS-1$
   }
   File conf = getConfigurationDirectory(environmentVariables);
   if (conf == null) {
     // show warning only the first time
     if (SAW_HADOOP_CONF_MISSING.compareAndSet(false, true)) {
       LOG.warn("Hadoop configuration path is not found");
     }
     return null;
   }
   if (conf.isDirectory() == false) {
     LOG.warn(
         MessageFormat.format(
             "Failed to load default Hadoop configurations ({0} is not a valid installation path)",
             conf));
     return null;
   }
   try {
     return conf.toURI().toURL();
   } catch (MalformedURLException e) {
     LOG.warn(
         MessageFormat.format(
             "Failed to load default Hadoop configurations ({0} is unrecognized to convert URL)",
             conf),
         e);
     return null;
   }
 }
Пример #17
0
 @Override
 public void run() {
   boolean outputFailed = false;
   try {
     InputStream in = input;
     OutputStream out = output;
     byte[] buf = new byte[256];
     while (true) {
       int read = in.read(buf);
       if (read == -1) {
         break;
       }
       if (outputFailed == false) {
         try {
           out.write(buf, 0, read);
         } catch (IOException e) {
           outputFailed = true;
           LOG.warn(MessageFormat.format("Failed to redirect stdout of subprocess", e));
         }
       }
     }
   } catch (IOException e) {
     LOG.warn(MessageFormat.format("Failed to redirect stdio of subprocess", e));
   }
 }
Пример #18
0
  public String readCGCell(Row row, int cellIdx, DataDto dto, CellType cellType) {
    Cell cell = row.getCell(cellIdx);
    if (null == cell) {
      log.warn(
          "Table import. Table "
              + metaData.getTableName()
              + " Cell at row: "
              + (row.getRowNum() + 1)
              + " col: "
              + (cellIdx + 1)
              + " current value: "
              + dto.getConfidenceGrade()
              + " cell is null.");
      return null;
    }
    if (cell.getCellType() != Cell.CELL_TYPE_STRING) {
      log.warn(
          "Table import. Table "
              + metaData.getTableName()
              + " Cell at row: "
              + (row.getRowNum() + 1)
              + " col: "
              + (cellIdx + 1)
              + " current value: "
              + dto.getConfidenceGrade()
              + " CG cell is not of type String.");
      return null;
    }

    RichTextString cg = cell.getRichStringCellValue();
    return cg.getString();
  }
  /**
   * Actually performs context closing: publishes a ContextClosedEvent and destroys the singletons
   * in the bean factory of this application context.
   *
   * <p>Called by both {@code close()} and a JVM shutdown hook, if any.
   *
   * @see org.springframework.context.event.ContextClosedEvent
   * @see #destroyBeans()
   * @see #close()
   * @see #registerShutdownHook()
   */
  protected void doClose() {
    if (this.active.get() && this.closed.compareAndSet(false, true)) {
      if (logger.isInfoEnabled()) {
        logger.info("Closing " + this);
      }

      LiveBeansView.unregisterApplicationContext(this);

      try {
        // Publish shutdown event.
        publishEvent(new ContextClosedEvent(this));
      } catch (Throwable ex) {
        logger.warn("Exception thrown from ApplicationListener handling ContextClosedEvent", ex);
      }

      // Stop all Lifecycle beans, to avoid delays during individual destruction.
      try {
        getLifecycleProcessor().onClose();
      } catch (Throwable ex) {
        logger.warn("Exception thrown from LifecycleProcessor on context close", ex);
      }

      // Destroy all cached singletons in the context's BeanFactory.
      destroyBeans();

      // Close the state of this context itself.
      closeBeanFactory();

      // Let subclasses do some final clean-up if they wish...
      onClose();

      this.active.set(false);
    }
  }
  /* ------------------------------------------------------------ */
  public Object invoke(String name, Object[] params, String[] signature)
      throws MBeanException, ReflectionException {
    if (log.isDebugEnabled()) log.debug("invoke " + name);

    String methodKey = name + "(";
    if (signature != null)
      for (int i = 0; i < signature.length; i++) methodKey += (i > 0 ? "," : "") + signature[i];
    methodKey += ")";

    try {
      Method method = (Method) _method.get(methodKey);
      if (method == null) throw new NoSuchMethodException(methodKey);

      Object o = _object;
      if (method.getDeclaringClass().isInstance(this)) o = this;
      return method.invoke(o, params);
    } catch (NoSuchMethodException e) {
      log.warn(LogSupport.EXCEPTION, e);
      throw new ReflectionException(e);
    } catch (IllegalAccessException e) {
      log.warn(LogSupport.EXCEPTION, e);
      throw new MBeanException(e);
    } catch (InvocationTargetException e) {
      log.warn(LogSupport.EXCEPTION, e);
      throw new ReflectionException((Exception) e.getTargetException());
    }
  }
Пример #21
0
 /**
  * Cancel grant on a node
  *
  * @param nodeName the node the grant is on
  * @param sessionId the session the grant was given to
  * @param requestId the request this grant satisfied
  */
 public void cancelGrant(String nodeName, String sessionId, int requestId) {
   ClusterNode node = nameToNode.get(nodeName);
   if (node == null) {
     LOG.warn("Canceling grant for non-existent node: " + nodeName);
     return;
   }
   synchronized (node) {
     if (node.deleted) {
       LOG.warn("Canceling grant for deleted node: " + nodeName);
       return;
     }
     String hoststr = node.getClusterNodeInfo().getAddress().getHost();
     if (!canAllowNode(hoststr)) {
       LOG.warn("Canceling grant for excluded node: " + hoststr);
       return;
     }
     ResourceRequestInfo req = node.getRequestForGrant(sessionId, requestId);
     if (req != null) {
       ResourceRequest unitReq = Utilities.getUnitResourceRequest(req.getType());
       boolean previouslyRunnable = node.checkForGrant(unitReq, resourceLimit);
       node.cancelGrant(sessionId, requestId);
       loadManager.decrementLoad(req.getType());
       if (!previouslyRunnable && node.checkForGrant(unitReq, resourceLimit)) {
         RunnableIndices r = typeToIndices.get(req.getType());
         if (!faultManager.isBlacklisted(node.getName(), req.getType())) {
           r.addRunnable(node);
         }
       }
     }
   }
 }
Пример #22
0
  /**
   * Downloads an S3Object, as returned from {@link
   * AmazonS3Client#getObject(com.amazonaws.services.s3.model.GetObjectRequest)}, to the specified
   * file.
   *
   * @param s3Object The S3Object containing a reference to an InputStream containing the object's
   *     data.
   * @param destinationFile The file to store the object's data in.
   * @param performIntegrityCheck Boolean valuable to indicate whether do the integrity check or not
   */
  public static void downloadObjectToFile(
      S3Object s3Object, File destinationFile, boolean performIntegrityCheck) {

    // attempt to create the parent if it doesn't exist
    File parentDirectory = destinationFile.getParentFile();
    if (parentDirectory != null && !parentDirectory.exists()) {
      parentDirectory.mkdirs();
    }

    OutputStream outputStream = null;
    try {
      outputStream = new BufferedOutputStream(new FileOutputStream(destinationFile));
      byte[] buffer = new byte[1024 * 10];
      int bytesRead;
      while ((bytesRead = s3Object.getObjectContent().read(buffer)) > -1) {
        outputStream.write(buffer, 0, bytesRead);
      }
    } catch (IOException e) {
      try {
        s3Object.getObjectContent().abort();
      } catch (IOException abortException) {
        log.warn("Couldn't abort stream", e);
      }
      throw new AmazonClientException(
          "Unable to store object contents to disk: " + e.getMessage(), e);
    } finally {
      try {
        outputStream.close();
      } catch (Exception e) {
      }
      try {
        s3Object.getObjectContent().close();
      } catch (Exception e) {
      }
    }

    byte[] clientSideHash = null;
    byte[] serverSideHash = null;
    try {
      // Multipart Uploads don't have an MD5 calculated on the service side
      if (ServiceUtils.isMultipartUploadETag(s3Object.getObjectMetadata().getETag()) == false) {
        clientSideHash = Md5Utils.computeMD5Hash(new FileInputStream(destinationFile));
        serverSideHash = BinaryUtils.fromHex(s3Object.getObjectMetadata().getETag());
      }
    } catch (Exception e) {
      log.warn("Unable to calculate MD5 hash to validate download: " + e.getMessage(), e);
    }

    if (performIntegrityCheck
        && clientSideHash != null
        && serverSideHash != null
        && !Arrays.equals(clientSideHash, serverSideHash)) {
      throw new AmazonClientException(
          "Unable to verify integrity of data download.  "
              + "Client calculated content hash didn't match hash calculated by Amazon S3.  "
              + "The data stored in '"
              + destinationFile.getAbsolutePath()
              + "' may be corrupt.");
    }
  }
  @Override
  public void doFilter(
      ServletRequest servletRqst, ServletResponse servletResponse, FilterChain filterChain)
      throws IOException, ServletException {
    // First look for a session token in the header or as a parameter
    HttpServletRequest req = (HttpServletRequest) servletRqst;
    String sessionToken = req.getHeader(AuthorizationConstants.SESSION_TOKEN_PARAM);
    if (sessionToken == null) {
      // Check for a session token as a parameter
      sessionToken = req.getParameter(AuthorizationConstants.SESSION_TOKEN_PARAM);
    }

    // Determine the caller's identity
    String username = null;

    // A session token maps to a specific user
    if (sessionToken != null) {
      try {
        String userId = authenticationService.revalidate(sessionToken);
        username = authenticationService.getUsername(userId);
      } catch (Exception xee) {
        String reason = "The session token is invalid.";
        reject(req, (HttpServletResponse) servletResponse, reason);
        log.warn("invalid session token", xee);
        return;
      }

      // If there is no session token, then check for a HMAC signature
    } else if (isSigned(req)) {
      username = req.getHeader(AuthorizationConstants.USER_ID_HEADER);
      try {
        String secretKey = authenticationService.getSecretKey(username);
        matchHMACSHA1Signature(req, secretKey);
      } catch (UnauthorizedException e) {
        reject(req, (HttpServletResponse) servletResponse, e.getMessage());
        log.warn("Invalid HMAC signature", e);
        return;
      } catch (NotFoundException e) {
        reject(req, (HttpServletResponse) servletResponse, e.getMessage());
        log.warn("Invalid HMAC signature", e);
        return;
      }
    }
    if (username == null && !allowAnonymous) {
      String reason = "The session token provided was missing, invalid or expired.";
      reject(req, (HttpServletResponse) servletResponse, reason);
      log.warn("Anonymous not allowed");
      return;
    }
    if (username == null) {
      username = AuthorizationConstants.ANONYMOUS_USER_ID;
    }

    // Pass along, including the user ID
    @SuppressWarnings("unchecked")
    Map<String, String[]> modParams = new HashMap<String, String[]>(req.getParameterMap());
    modParams.put(AuthorizationConstants.USER_ID_PARAM, new String[] {username});
    HttpServletRequest modRqst = new ModParamHttpServletRequest(req, modParams);
    filterChain.doFilter(modRqst, servletResponse);
  }
Пример #24
0
 @Override
 protected void chore() {
   try {
     FileStatus[] files = this.fs.listStatus(this.oldLogDir);
     int nbDeletedLog = 0;
     FILE:
     for (FileStatus file : files) {
       Path filePath = file.getPath();
       if (HLog.validateHLogFilename(filePath.getName())) {
         for (LogCleanerDelegate logCleaner : logCleanersChain) {
           if (!logCleaner.isLogDeletable(filePath)) {
             // this log is not deletable, continue to process next log file
             continue FILE;
           }
         }
         // delete this log file if it passes all the log cleaners
         this.fs.delete(filePath, true);
         nbDeletedLog++;
       } else {
         LOG.warn("Found a wrongly formated file: " + file.getPath().getName());
         this.fs.delete(filePath, true);
         nbDeletedLog++;
       }
       if (nbDeletedLog >= maxDeletedLogs) {
         break;
       }
     }
   } catch (IOException e) {
     e = RemoteExceptionHandler.checkIOException(e);
     LOG.warn("Error while cleaning the logs", e);
   }
 }
Пример #25
0
  /**
   * Gets the message type based on the content type headers.
   *
   * @param msg The message to extract the type from.
   * @return MDN if the message is an MDN message (<a href="http://tools.ietf.org/html/rfc3798">RFC
   *     3798</a>)<br>
   *     DSN if the message is a DNS message (<a href="http://tools.ietf.org/html/rfc3464">RFC
   *     3464</a>)<br>
   *     Normal for all other message type.<br>
   *     Return Unknown if an error occurs.
   */
  public static TxMessageType getMessageType(MimeMessage msg) {
    try {
      ContentType contentType = new ContentType(msg.getContentType());

      if (contentType.match(MDNStandard.MediaType.ReportMessage)
          && contentType.getParameter(MDNStandard.MediaType.ReportType) != null) {

        if (contentType
            .getParameter(MDNStandard.MediaType.ReportType)
            .equalsIgnoreCase(MDNStandard.MediaType.ReportTypeValueNotification))
          return TxMessageType.MDN;
        else if (contentType
            .getParameter(DSNStandard.MediaType.ReportType)
            .equalsIgnoreCase(DSNStandard.MediaType.ReportTypeValueDelivery))
          return TxMessageType.DSN;
      } else if (contentType.match(SMIMEStandard.EncryptedContentMediaType)
          || contentType.match(SMIMEStandard.EncryptedContentMediaTypeAlternative)) {
        return TxMessageType.SMIME;
      }

      return TxMessageType.IMF;
    }
    /// CLOVER:OFF
    catch (ParseException e) {
      LOGGER.warn("Failed to discern message type.", e);
    } catch (MessagingException e) {
      LOGGER.warn("Failed to discern message type.", e);
    }
    return TxMessageType.UNKNOWN;
    /// CLOVER:ON
  }
Пример #26
0
 @Override
 public Object visit(MapProperty property, Object arg) throws PropertyException {
   Object value = null;
   if (property.isContainer()) {
     value = new JSONObject();
   } else {
     value = property.getValue();
   }
   if (property instanceof BlobProperty) {
     log.warn(
         "Property '"
             + property.getName()
             + "' ignored during serialization. Blob and blob related properties are not written to json object.");
   } else if (property.getParent() instanceof BlobProperty) {
     log.warn(
         "Property '"
             + property.getName()
             + "' ignored during serialization. Blob and blob related properties are not written to json object.");
   } else if (property.getParent().isList()) {
     ((JSONArray) arg).add(value);
   } else {
     try {
       ((JSONObject) arg).put(property.getField().getName().getPrefixedName(), value);
     } catch (JSONException e) {
       throw new PropertyException("Failed to put value", e);
     }
   }
   return value;
 }
  @Test
  public void testAnyToPDFConverter() throws Exception {
    ConversionService cs = Framework.getLocalService(ConversionService.class);
    ConverterCheckResult check = cs.isConverterAvailable("any2pdf");
    assertNotNull(check);
    if (!check.isAvailable()) {
      log.warn("Skipping JOD based converter tests since OOo is not installed");
      log.warn("  converter check output : " + check.getInstallationMessage());
      log.warn("  converter check output : " + check.getErrorMessage());
      return;
    }

    doTestPDFConverter("text/html", "hello.html");
    //        doTestPDFConverter("text/xml", "hello.xml");
    doTestPDFConverter("application/vnd.ms-excel", "hello.xls");
    doTestPDFConverter("application/vnd.sun.xml.writer", "hello.sxw");
    doTestPDFConverter("application/vnd.oasis.opendocument.text", "hello.odt");
    doTestPDFConverter("application/vnd.sun.xml.calc", "hello.sxc");
    doTestPDFConverter("application/vnd.oasis.opendocument.spreadsheet", "hello.ods");
    doTestPDFConverter("application/vnd.sun.xml.impress", "hello.sxi");
    doTestPDFConverter("application/vnd.oasis.opendocument.presentation", "hello.odp");

    doTestPDFConverter(
        "application/vnd.openxmlformats-officedocument.wordprocessingml.document", "hello.docx");
    doTestPDFConverter(
        "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", "hello.xlsx");
    doTestPDFConverter(
        "application/vnd.openxmlformats-officedocument.presentationml.presentation", "hello.pptx");
  }
Пример #28
0
 /**
  * Adds column to RR, checking for duplicate columns. Needed because CBO cannot handle the Hive
  * behavior of blindly overwriting old mapping in RR and still somehow working after that.
  *
  * @return True if mapping was added without duplicates.
  */
 public boolean putWithCheck(
     String tabAlias, String colAlias, String internalName, ColumnInfo newCI)
     throws SemanticException {
   ColumnInfo existing = get(tabAlias, colAlias);
   // Hive adds the same mapping twice... I wish we could fix stuff like that.
   if (existing == null) {
     put(tabAlias, colAlias, newCI);
     return true;
   } else if (existing.isSameColumnForRR(newCI)) {
     return true;
   }
   LOG.warn(
       "Found duplicate column alias in RR: "
           + existing.toMappingString(tabAlias, colAlias)
           + " adding "
           + newCI.toMappingString(tabAlias, colAlias));
   if (internalName != null) {
     existing = get(tabAlias, internalName);
     if (existing == null) {
       put(tabAlias, internalName, newCI);
       return true;
     } else if (existing.isSameColumnForRR(newCI)) {
       return true;
     }
     LOG.warn(
         "Failed to use internal name after finding a duplicate: "
             + existing.toMappingString(tabAlias, internalName));
   }
   return false;
 }
Пример #29
0
  @Override
  public void cacheBlock(BlockCacheKey blockName, Cacheable toBeCached) {
    ByteBuffer storedBlock;

    try {
      storedBlock = backingStore.alloc(toBeCached.getSerializedLength());
    } catch (InterruptedException e) {
      LOG.warn("SlabAllocator was interrupted while waiting for block to become available");
      LOG.warn(e);
      return;
    }

    CacheablePair newEntry = new CacheablePair(toBeCached.getDeserializer(), storedBlock);
    toBeCached.serialize(storedBlock);

    synchronized (this) {
      CacheablePair alreadyCached = backingMap.putIfAbsent(blockName, newEntry);

      if (alreadyCached != null) {
        backingStore.free(storedBlock);
        throw new RuntimeException("already cached " + blockName);
      }
      if (actionWatcher != null) {
        actionWatcher.onInsertion(blockName, this);
      }
    }
    newEntry.recentlyAccessed.set(System.nanoTime());
    this.size.addAndGet(newEntry.heapSize());
  }
    @Override
    public void run() {
      while (running.get()) {
        switch (random.nextInt() % 2) {
          case 0: // start a server
            try {
              cluster.startServer();
            } catch (Exception e) {
              LOG.warn(e);
              exception.compareAndSet(null, e);
            }
            break;

          case 1: // stop a server
            try {
              cluster.stopRandomServer();
            } catch (Exception e) {
              LOG.warn(e);
              exception.compareAndSet(null, e);
            }
          default:
        }

        Threads.sleep(100);
      }
    }