@Service public abstract class FantasyGame { private static final Log LOG = LogFactory.getLog(FantasyGame.class); protected FantasyTeam fantasyTeam; public FantasyGame() {} public void tradeForTomorrow() throws Exception { tradeForDate(DateUtil.getGameTomorrow()); } public void tradeForDate(Date date) throws Exception { BbcLeague bbcLeague = getLeague(date); Starters expectedStarters = fantasyTeam.getStrategy().pickStarters(date, bbcLeague); Starters actualStarters = tradeForStarters(expectedStarters); boolean startersSet = actualStarters.equals(expectedStarters); System.out.println("starters look good - " + startersSet); } public abstract BbcLeague getLeague(Date date) throws IOException; public abstract Starters tradeForStarters(Starters starters) throws IOException; }
/** * Create on 28/02/2013 (12:46:06) * * @author Ana Andres */ public class SeeMessageSelectedColorsAction extends BaseAction { private static Log log = LogFactory.getLog(SeeMessageSelectedColorsAction.class); @Inject public Tab tab; public void execute() throws Exception { int[] selected = getTab().getSelected(); // test the old method Map[] selectedKeys = getTab().getSelectedKeys(); if (selected == null || selectedKeys == null) return; String m = ""; String o = ""; for (int i = 0; i < selected.length; i++) m += "[" + selected[i] + "]"; for (int i = 0; i < selectedKeys.length; i++) o += "[" + selectedKeys[i] + "]"; addMessage("color_selected_old", m); addMessage("color_selected_new", "'" + o + "'"); } public Tab getTab() { return tab; } public void setTab(Tab tab) { this.tab = tab; } }
private void logSomething(boolean expectedDebug) { Log log = LogFactory.getLog(Object.class); log.warn("Warning message."); log.debug("Debug message."); log.error("Error message."); log.error("Error with Exception.", new Exception("Test exception.")); assertEquals(expectedDebug, log.isDebugEnabled()); }
/** * A helper to load the native hadoop code i.e. libhadoop.so. This handles the fallback to either * the bundled libhadoop-Linux-i386-32.so or the the default java implementations where appropriate. */ public class NativeCodeLoader { private static final Log LOG = LogFactory.getLog("org.apache.hadoop.util.NativeCodeLoader"); private static boolean nativeCodeLoaded = false; static { // Try to load native hadoop library and set fallback flag appropriately LOG.debug("Trying to load the custom-built native-hadoop library..."); try { System.loadLibrary("hadoop"); LOG.info("Loaded the native-hadoop library"); nativeCodeLoaded = true; } catch (Throwable t) { // Ignore failure to load LOG.debug("Failed to load native-hadoop with error: " + t); LOG.debug("java.library.path=" + System.getProperty("java.library.path")); } if (!nativeCodeLoaded) { LOG.warn( "Unable to load native-hadoop library for your platform... " + "using builtin-java classes where applicable"); } } /** * Check if native-hadoop code is loaded for this platform. * * @return <code>true</code> if native-hadoop is loaded, else <code>false</code> */ public static boolean isNativeCodeLoaded() { return nativeCodeLoaded; } /** * Return if native hadoop libraries, if present, can be used for this job. * * @param jobConf job configuration * @return <code>true</code> if native hadoop libraries, if present, can be used for this job; * <code>false</code> otherwise. */ public boolean getLoadNativeLibraries(JobConf jobConf) { return jobConf.getBoolean("hadoop.native.lib", true); } /** * Set if native hadoop libraries, if present, can be used for this job. * * @param jobConf job configuration * @param loadNativeLibraries can native hadoop libraries be loaded */ public void setLoadNativeLibraries(JobConf jobConf, boolean loadNativeLibraries) { jobConf.setBoolean("hadoop.native.lib", loadNativeLibraries); } }
// This happens very early - check it. static { try { log = LogFactory.getLog(Servlet.class); } catch (Exception ex) { System.err.println("Exception creating the logger"); System.err.println("Commons logging jar files in WEB-INF/lib/?"); System.err.println(ex.getMessage()); // ex.printStackTrace(System.err) ; } }
/** @author Javier Paniza */ public class DescriptionsListTag extends TagSupport { private static Log log = LogFactory.getLog(DescriptionsListTag.class); private String reference; public int doStartTag() throws JspException { try { HttpServletRequest request = (HttpServletRequest) pageContext.getRequest(); ModuleContext context = (ModuleContext) request.getSession().getAttribute("context"); String viewObject = request.getParameter("viewObject"); viewObject = (viewObject == null || viewObject.equals("")) ? "xava_view" : viewObject; View view = (View) context.get(request, viewObject); MetaReference metaReference = view.getMetaReference(reference).cloneMetaReference(); metaReference.setName(reference); String prefix = request.getParameter("propertyPrefix"); prefix = prefix == null ? "" : prefix; String application = request.getParameter("application"); String module = request.getParameter("module"); String referenceKey = Ids.decorate(application, module, prefix + reference); request.setAttribute(referenceKey, metaReference); String editorURL = "reference.jsp?referenceKey=" + referenceKey + "&onlyEditor=true&frame=false&composite=false&descriptionsList=true"; String editorPrefix = Module.isPortlet() ? "/WEB-INF/jsp/xava/" : "/xava/"; try { pageContext.include(editorPrefix + editorURL); } catch (ServletException ex) { Throwable cause = ex.getRootCause() == null ? ex : ex.getRootCause(); log.error(cause.getMessage(), cause); pageContext.include(editorPrefix + "editors/notAvailableEditor.jsp"); } catch (Exception ex) { log.error(ex.getMessage(), ex); pageContext.include(editorPrefix + "editors/notAvailableEditor.jsp"); } } catch (Exception ex) { log.error(ex.getMessage(), ex); throw new JspException(XavaResources.getString("descriptionsList_tag_error", reference)); } return SKIP_BODY; } public String getReference() { return reference; } public void setReference(String property) { this.reference = property; } }
public static void main(String args[]) throws IOException { Log log = LogFactory.getLog(Client.class); ApplicationContext context = new AnnotationConfigApplicationContext(ClientConfig.class); SalesCalculator salesCalculator = context.getBean(SalesCalculator.class); String[] products = new String[] {"Apple iPad", "Apple iPod", "Apple macBook"}; for (String productName : products) { BigDecimal total = salesCalculator.totalSalesForProduct(productName); log.info("total sales for " + productName + " = $" + total); } }
// Clase UsuariosMunicipio // Lee el usuario. public class UsuariosMunicipio { private static Log log = LogFactory.getLog(UsuariosMunicipio.class); private static final ThreadLocal municipioUsuario = new ThreadLocal(); private static final ThreadLocal municipioUsuarioInfo = new ThreadLocal(); public static String getMunicipioUsuario() { return (String) municipioUsuario.get(); } public static UserInfo getMunicipioUsuarioInfo() { UserInfo userInfo = (UserInfo) municipioUsuarioInfo.get(); if (userInfo == null) userInfo = new UserInfo(); userInfo.setId(getMunicipioUsuario()); return userInfo; } public static void setMunicipioUsuario(String userName) { municipioUsuario.set(userName); municipioUsuarioInfo.set(null); } public static void setCurrentUserInfo(UserInfo userInfo) { municipioUsuario.set(userInfo.getId()); municipioUsuarioInfo.set(userInfo); } public static void setCurrent(HttpServletRequest request) { Object rundata = request.getAttribute("rundata"); String portalUser = (String) request.getSession().getAttribute("xava.portal.user"); String webUser = (String) request.getSession().getAttribute("xava.user"); String user = portalUser == null ? webUser : portalUser; if (Is.emptyString(user) && rundata != null) { PropertiesManager pmRundata = new PropertiesManager(rundata); try { Object jetspeedUser = pmRundata.executeGet("user"); PropertiesManager pmUser = new PropertiesManager(jetspeedUser); user = (String) pmUser.executeGet("userName"); } catch (Exception ex) { log.warn(XavaResources.getString("warning_get_user"), ex); user = null; } } municipioUsuario.set(user); request.getSession().setAttribute("xava.user", user); municipioUsuarioInfo.set(request.getSession().getAttribute("xava.portal.userinfo")); } }
public class GameTimeoutAction implements V086GameEventHandler { private static Log log = LogFactory.getLog(GameTimeoutAction.class); private static final String desc = "GameTimeoutAction"; private static GameTimeoutAction singleton = new GameTimeoutAction(); public static GameTimeoutAction getInstance() { return singleton; } private int handledCount = 0; private GameTimeoutAction() {} public int getHandledEventCount() { return handledCount; } public String toString() { return desc; } public void handleEvent(GameEvent event, V086Controller.V086ClientHandler clientHandler) { handledCount++; GameTimeoutEvent timeoutEvent = (GameTimeoutEvent) event; KailleraUser player = timeoutEvent.getUser(); KailleraUser user = clientHandler.getUser(); if (player.equals(user)) { log.debug( user + " received timeout event " + timeoutEvent.getTimeoutNumber() + " for " + timeoutEvent.getGame() + ": resending messages..."); clientHandler.resend(timeoutEvent.getTimeoutNumber()); } else { log.debug( user + " received timeout event " + timeoutEvent.getTimeoutNumber() + " from " + player + " for " + timeoutEvent.getGame()); } } }
public class BSONFileOutputFormat<K, V> extends OutputFormat<K, V> { public void checkOutputSpecs(final JobContext context) {} public OutputCommitter getOutputCommitter(final TaskAttemptContext context) { return new MongoOutputCommiter(); } @Override public RecordWriter<K, V> getRecordWriter(final TaskAttemptContext context) { return new BSONFileRecordWriter(context); } public BSONFileOutputFormat() {} private static final Log LOG = LogFactory.getLog(BSONFileOutputFormat.class); }
public class GameDesynchAction implements V086GameEventHandler { private static Log log = LogFactory.getLog(GameDesynchAction.class); private static final String desc = "GameDesynchAction"; // $NON-NLS-1$ private static GameDesynchAction singleton = new GameDesynchAction(); public static GameDesynchAction getInstance() { return singleton; } private int handledCount = 0; private GameDesynchAction() {} public int getHandledEventCount() { return handledCount; } public String toString() { return desc; } public void handleEvent(GameEvent event, V086Controller.V086ClientHandler clientHandler) { handledCount++; GameDesynchEvent desynchEvent = (GameDesynchEvent) event; try { clientHandler.send( new GameChat_Notification( clientHandler.getNextMessageNumber(), EmuLang.getString("GameDesynchAction.DesynchDetected"), desynchEvent.getMessage())); // $NON-NLS-1$ // if (clientHandler.getUser().getStatus() == KailleraUser.STATUS_PLAYING) // clientHandler.getUser().dropGame(); } catch (MessageFormatException e) { log.error( "Failed to contruct GameChat_Notification message: " + e.getMessage(), e); // $NON-NLS-1$ } // catch (DropGameException e) // { // log.error("Failed to drop game during desynch: " + e.getMessage(), e); // } } }
/** @author jclopez */ public class AccountEntryValidator implements Validator { private static final Log log = LogFactory.getLog(EuroValidator.class); /** */ public void validate(FacesContext context, UIComponent component, Object value) throws ValidatorException { log.info("validate - value = " + value); if (value != null) { // Check if value is a BigDecimal if (!(value instanceof BigDecimal)) { log.info("validate - value is not a BigDecimal (" + value.getClass().getName() + ")"); throw new ValidatorException( new FacesMessage("Las cantidades monetarias deben ser de tipo BigDecimal")); } // Check if it has no more than 2 decimal digits BigDecimal bd = (BigDecimal) value; if (bd.scale() > 2) { log.info("validate - value has more than 2 decimals (" + value + ")"); throw new ValidatorException( new FacesMessage("Las cantidades monetarias no pueden tener mas de dos decimales")); } AccountEntryBean bean = (AccountEntryBean) FacesUtils.getBean("accountEntryBean"); AccountEntryType type = bean.getType(); AccountEntryGroup group = type.getGroup(); if (group.getId() == ConfigurationUtil.getDefault().getCostId()) { if (bd.signum() != -1) { log.info("validate - value cost is negative (" + value + ")"); throw new ValidatorException(new FacesMessage("La cantidad debe ser negativa")); } } if (group.getId() == ConfigurationUtil.getDefault().getIncomeId()) { if (bd.signum() != 1) { log.info("validate - value incom is positive (" + value + ")"); throw new ValidatorException(new FacesMessage("La cantidad debe ser positiva")); } } } } }
/** * Client for accessing AWS CloudFormation. All service calls made using this client are blocking, * and will not return until the service call completes. * * <p><fullname>AWS CloudFormation</fullname> * * <p>AWS CloudFormation enables you to create and manage AWS infrastructure deployments predictably * and repeatedly. AWS CloudFormation helps you leverage AWS products such as Amazon EC2, EBS, * Amazon SNS, ELB, and Auto Scaling to build highly-reliable, highly scalable, cost effective * applications without worrying about creating and configuring the underlying AWS infrastructure. * * <p>With AWS CloudFormation, you declare all of your resources and dependencies in a template * file. The template defines a collection of resources as a single unit called a stack. AWS * CloudFormation creates and deletes all member resources of the stack together and manages all * dependencies between the resources for you. * * <p>For more information about this product, go to the <a * href="http://aws.amazon.com/cloudformation/">CloudFormation Product Page</a>. * * <p>Amazon CloudFormation makes use of other AWS products. If you need additional technical * information about a specific AWS product, you can find the product's technical documentation at * <a href="http://aws.amazon.com/documentation/" >http://aws.amazon.com/documentation/</a>. */ public class AmazonCloudFormationClient extends AmazonWebServiceClient implements AmazonCloudFormation { /** Provider for AWS credentials. */ private AWSCredentialsProvider awsCredentialsProvider; private static final Log log = LogFactory.getLog(AmazonCloudFormation.class); /** Default signing name for the service. */ private static final String DEFAULT_SIGNING_NAME = "cloudformation"; /** The region metadata service name for computing region endpoints. */ private static final String DEFAULT_ENDPOINT_PREFIX = "cloudformation"; /** List of exception unmarshallers for all AWS CloudFormation exceptions. */ protected final List<Unmarshaller<AmazonServiceException, Node>> exceptionUnmarshallers = new ArrayList<Unmarshaller<AmazonServiceException, Node>>(); /** * Constructs a new client to invoke service methods on AWS CloudFormation. A credentials provider * chain will be used that searches for credentials in this order: * * <ul> * <li>Environment Variables - AWS_ACCESS_KEY_ID and AWS_SECRET_KEY * <li>Java System Properties - aws.accessKeyId and aws.secretKey * <li>Instance profile credentials delivered through the Amazon EC2 metadata service * </ul> * * <p>All service calls made using this new client object are blocking, and will not return until * the service call completes. * * @see DefaultAWSCredentialsProviderChain */ public AmazonCloudFormationClient() { this( new DefaultAWSCredentialsProviderChain(), com.amazonaws.PredefinedClientConfigurations.defaultConfig()); } /** * Constructs a new client to invoke service methods on AWS CloudFormation. A credentials provider * chain will be used that searches for credentials in this order: * * <ul> * <li>Environment Variables - AWS_ACCESS_KEY_ID and AWS_SECRET_KEY * <li>Java System Properties - aws.accessKeyId and aws.secretKey * <li>Instance profile credentials delivered through the Amazon EC2 metadata service * </ul> * * <p>All service calls made using this new client object are blocking, and will not return until * the service call completes. * * @param clientConfiguration The client configuration options controlling how this client * connects to AWS CloudFormation (ex: proxy settings, retry counts, etc.). * @see DefaultAWSCredentialsProviderChain */ public AmazonCloudFormationClient(ClientConfiguration clientConfiguration) { this(new DefaultAWSCredentialsProviderChain(), clientConfiguration); } /** * Constructs a new client to invoke service methods on AWS CloudFormation using the specified AWS * account credentials. * * <p>All service calls made using this new client object are blocking, and will not return until * the service call completes. * * @param awsCredentials The AWS credentials (access key ID and secret key) to use when * authenticating with AWS services. */ public AmazonCloudFormationClient(AWSCredentials awsCredentials) { this(awsCredentials, com.amazonaws.PredefinedClientConfigurations.defaultConfig()); } /** * Constructs a new client to invoke service methods on AWS CloudFormation using the specified AWS * account credentials and client configuration options. * * <p>All service calls made using this new client object are blocking, and will not return until * the service call completes. * * @param awsCredentials The AWS credentials (access key ID and secret key) to use when * authenticating with AWS services. * @param clientConfiguration The client configuration options controlling how this client * connects to AWS CloudFormation (ex: proxy settings, retry counts, etc.). */ public AmazonCloudFormationClient( AWSCredentials awsCredentials, ClientConfiguration clientConfiguration) { super(clientConfiguration); this.awsCredentialsProvider = new StaticCredentialsProvider(awsCredentials); init(); } /** * Constructs a new client to invoke service methods on AWS CloudFormation using the specified AWS * account credentials provider. * * <p>All service calls made using this new client object are blocking, and will not return until * the service call completes. * * @param awsCredentialsProvider The AWS credentials provider which will provide credentials to * authenticate requests with AWS services. */ public AmazonCloudFormationClient(AWSCredentialsProvider awsCredentialsProvider) { this(awsCredentialsProvider, com.amazonaws.PredefinedClientConfigurations.defaultConfig()); } /** * Constructs a new client to invoke service methods on AWS CloudFormation using the specified AWS * account credentials provider and client configuration options. * * <p>All service calls made using this new client object are blocking, and will not return until * the service call completes. * * @param awsCredentialsProvider The AWS credentials provider which will provide credentials to * authenticate requests with AWS services. * @param clientConfiguration The client configuration options controlling how this client * connects to AWS CloudFormation (ex: proxy settings, retry counts, etc.). */ public AmazonCloudFormationClient( AWSCredentialsProvider awsCredentialsProvider, ClientConfiguration clientConfiguration) { this(awsCredentialsProvider, clientConfiguration, null); } /** * Constructs a new client to invoke service methods on AWS CloudFormation using the specified AWS * account credentials provider, client configuration options, and request metric collector. * * <p>All service calls made using this new client object are blocking, and will not return until * the service call completes. * * @param awsCredentialsProvider The AWS credentials provider which will provide credentials to * authenticate requests with AWS services. * @param clientConfiguration The client configuration options controlling how this client * connects to AWS CloudFormation (ex: proxy settings, retry counts, etc.). * @param requestMetricCollector optional request metric collector */ public AmazonCloudFormationClient( AWSCredentialsProvider awsCredentialsProvider, ClientConfiguration clientConfiguration, RequestMetricCollector requestMetricCollector) { super(clientConfiguration, requestMetricCollector); this.awsCredentialsProvider = awsCredentialsProvider; init(); } private void init() { exceptionUnmarshallers.add(new LimitExceededExceptionUnmarshaller()); exceptionUnmarshallers.add(new AlreadyExistsExceptionUnmarshaller()); exceptionUnmarshallers.add(new InsufficientCapabilitiesExceptionUnmarshaller()); exceptionUnmarshallers.add(new StandardErrorUnmarshaller()); setServiceNameIntern(DEFAULT_SIGNING_NAME); setEndpointPrefix(DEFAULT_ENDPOINT_PREFIX); // calling this.setEndPoint(...) will also modify the signer accordingly this.setEndpoint("https://cloudformation.us-east-1.amazonaws.com"); HandlerChainFactory chainFactory = new HandlerChainFactory(); requestHandler2s.addAll( chainFactory.newRequestHandlerChain( "/com/amazonaws/services/cloudformation/request.handlers")); requestHandler2s.addAll( chainFactory.newRequestHandler2Chain( "/com/amazonaws/services/cloudformation/request.handler2s")); } /** * Cancels an update on the specified stack. If the call completes successfully, the stack rolls * back the update and reverts to the previous stack configuration. <note>You can cancel only * stacks that are in the UPDATE_IN_PROGRESS state.</note> * * @param cancelUpdateStackRequest The input for the <a>CancelUpdateStack</a> action. * @sample AmazonCloudFormation.CancelUpdateStack */ @Override public void cancelUpdateStack(CancelUpdateStackRequest cancelUpdateStackRequest) { ExecutionContext executionContext = createExecutionContext(cancelUpdateStackRequest); AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<CancelUpdateStackRequest> request = null; Response<Void> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new CancelUpdateStackRequestMarshaller() .marshall(super.beforeMarshalling(cancelUpdateStackRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<Void> responseHandler = new StaxResponseHandler<Void>(null); invoke(request, responseHandler, executionContext); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * Creates a stack as specified in the template. After the call completes successfully, the stack * creation starts. You can check the status of the stack via the <a>DescribeStacks</a> API. * * @param createStackRequest The input for <a>CreateStack</a> action. * @return Result of the CreateStack operation returned by the service. * @throws LimitExceededException Quota for the resource has already been reached. * @throws AlreadyExistsException Resource with the name requested already exists. * @throws InsufficientCapabilitiesException The template contains resources with capabilities * that were not specified in the Capabilities parameter. * @sample AmazonCloudFormation.CreateStack */ @Override public CreateStackResult createStack(CreateStackRequest createStackRequest) { ExecutionContext executionContext = createExecutionContext(createStackRequest); AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<CreateStackRequest> request = null; Response<CreateStackResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new CreateStackRequestMarshaller() .marshall(super.beforeMarshalling(createStackRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<CreateStackResult> responseHandler = new StaxResponseHandler<CreateStackResult>(new CreateStackResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * Deletes a specified stack. Once the call completes successfully, stack deletion starts. Deleted * stacks do not show up in the <a>DescribeStacks</a> API if the deletion has been completed * successfully. * * @param deleteStackRequest The input for <a>DeleteStack</a> action. * @sample AmazonCloudFormation.DeleteStack */ @Override public void deleteStack(DeleteStackRequest deleteStackRequest) { ExecutionContext executionContext = createExecutionContext(deleteStackRequest); AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<DeleteStackRequest> request = null; Response<Void> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new DeleteStackRequestMarshaller() .marshall(super.beforeMarshalling(deleteStackRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<Void> responseHandler = new StaxResponseHandler<Void>(null); invoke(request, responseHandler, executionContext); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * Retrieves your account's AWS CloudFormation limits, such as the maximum number of stacks that * you can create in your account. * * @param describeAccountLimitsRequest The input for the <a>DescribeAccountLimits</a> action. * @return Result of the DescribeAccountLimits operation returned by the service. * @sample AmazonCloudFormation.DescribeAccountLimits */ @Override public DescribeAccountLimitsResult describeAccountLimits( DescribeAccountLimitsRequest describeAccountLimitsRequest) { ExecutionContext executionContext = createExecutionContext(describeAccountLimitsRequest); AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<DescribeAccountLimitsRequest> request = null; Response<DescribeAccountLimitsResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new DescribeAccountLimitsRequestMarshaller() .marshall(super.beforeMarshalling(describeAccountLimitsRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<DescribeAccountLimitsResult> responseHandler = new StaxResponseHandler<DescribeAccountLimitsResult>( new DescribeAccountLimitsResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * Returns all stack related events for a specified stack. For more information about a stack's * event history, go to <a href= * "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/concept-stack.html" >Stacks</a> * in the AWS CloudFormation User Guide. <note>You can list events for stacks that have failed to * create or have been deleted by specifying the unique stack identifier (stack ID).</note> * * @param describeStackEventsRequest The input for <a>DescribeStackEvents</a> action. * @return Result of the DescribeStackEvents operation returned by the service. * @sample AmazonCloudFormation.DescribeStackEvents */ @Override public DescribeStackEventsResult describeStackEvents( DescribeStackEventsRequest describeStackEventsRequest) { ExecutionContext executionContext = createExecutionContext(describeStackEventsRequest); AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<DescribeStackEventsRequest> request = null; Response<DescribeStackEventsResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new DescribeStackEventsRequestMarshaller() .marshall(super.beforeMarshalling(describeStackEventsRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<DescribeStackEventsResult> responseHandler = new StaxResponseHandler<DescribeStackEventsResult>( new DescribeStackEventsResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * Returns a description of the specified resource in the specified stack. * * <p>For deleted stacks, DescribeStackResource returns resource information for up to 90 days * after the stack has been deleted. * * @param describeStackResourceRequest The input for <a>DescribeStackResource</a> action. * @return Result of the DescribeStackResource operation returned by the service. * @sample AmazonCloudFormation.DescribeStackResource */ @Override public DescribeStackResourceResult describeStackResource( DescribeStackResourceRequest describeStackResourceRequest) { ExecutionContext executionContext = createExecutionContext(describeStackResourceRequest); AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<DescribeStackResourceRequest> request = null; Response<DescribeStackResourceResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new DescribeStackResourceRequestMarshaller() .marshall(super.beforeMarshalling(describeStackResourceRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<DescribeStackResourceResult> responseHandler = new StaxResponseHandler<DescribeStackResourceResult>( new DescribeStackResourceResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * Returns AWS resource descriptions for running and deleted stacks. If <code>StackName</code> is * specified, all the associated resources that are part of the stack are returned. If <code> * PhysicalResourceId</code> is specified, the associated resources of the stack that the resource * belongs to are returned. <note>Only the first 100 resources will be returned. If your stack has * more resources than this, you should use <code>ListStackResources</code> instead.</note> * * <p>For deleted stacks, <code>DescribeStackResources</code> returns resource information for up * to 90 days after the stack has been deleted. * * <p>You must specify either <code>StackName</code> or <code>PhysicalResourceId</code>, but not * both. In addition, you can specify <code>LogicalResourceId</code> to filter the returned * result. For more information about resources, the <code>LogicalResourceId</code> and <code> * PhysicalResourceId</code>, go to the <a * href="http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide">AWS CloudFormation User * Guide</a>. <note>A <code>ValidationError</code> is returned if you specify both <code>StackName * </code> and <code>PhysicalResourceId</code> in the same request.</note> * * @param describeStackResourcesRequest The input for <a>DescribeStackResources</a> action. * @return Result of the DescribeStackResources operation returned by the service. * @sample AmazonCloudFormation.DescribeStackResources */ @Override public DescribeStackResourcesResult describeStackResources( DescribeStackResourcesRequest describeStackResourcesRequest) { ExecutionContext executionContext = createExecutionContext(describeStackResourcesRequest); AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<DescribeStackResourcesRequest> request = null; Response<DescribeStackResourcesResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new DescribeStackResourcesRequestMarshaller() .marshall(super.beforeMarshalling(describeStackResourcesRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<DescribeStackResourcesResult> responseHandler = new StaxResponseHandler<DescribeStackResourcesResult>( new DescribeStackResourcesResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * Returns the description for the specified stack; if no stack name was specified, then it * returns the description for all the stacks created. * * @param describeStacksRequest The input for <a>DescribeStacks</a> action. * @return Result of the DescribeStacks operation returned by the service. * @sample AmazonCloudFormation.DescribeStacks */ @Override public DescribeStacksResult describeStacks(DescribeStacksRequest describeStacksRequest) { ExecutionContext executionContext = createExecutionContext(describeStacksRequest); AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<DescribeStacksRequest> request = null; Response<DescribeStacksResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new DescribeStacksRequestMarshaller() .marshall(super.beforeMarshalling(describeStacksRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<DescribeStacksResult> responseHandler = new StaxResponseHandler<DescribeStacksResult>(new DescribeStacksResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } @Override public DescribeStacksResult describeStacks() { return describeStacks(new DescribeStacksRequest()); } /** * Returns the estimated monthly cost of a template. The return value is an AWS Simple Monthly * Calculator URL with a query string that describes the resources required to run the template. * * @param estimateTemplateCostRequest * @return Result of the EstimateTemplateCost operation returned by the service. * @sample AmazonCloudFormation.EstimateTemplateCost */ @Override public EstimateTemplateCostResult estimateTemplateCost( EstimateTemplateCostRequest estimateTemplateCostRequest) { ExecutionContext executionContext = createExecutionContext(estimateTemplateCostRequest); AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<EstimateTemplateCostRequest> request = null; Response<EstimateTemplateCostResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new EstimateTemplateCostRequestMarshaller() .marshall(super.beforeMarshalling(estimateTemplateCostRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<EstimateTemplateCostResult> responseHandler = new StaxResponseHandler<EstimateTemplateCostResult>( new EstimateTemplateCostResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } @Override public EstimateTemplateCostResult estimateTemplateCost() { return estimateTemplateCost(new EstimateTemplateCostRequest()); } /** * Returns the stack policy for a specified stack. If a stack doesn't have a policy, a null value * is returned. * * @param getStackPolicyRequest The input for the <a>GetStackPolicy</a> action. * @return Result of the GetStackPolicy operation returned by the service. * @sample AmazonCloudFormation.GetStackPolicy */ @Override public GetStackPolicyResult getStackPolicy(GetStackPolicyRequest getStackPolicyRequest) { ExecutionContext executionContext = createExecutionContext(getStackPolicyRequest); AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<GetStackPolicyRequest> request = null; Response<GetStackPolicyResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new GetStackPolicyRequestMarshaller() .marshall(super.beforeMarshalling(getStackPolicyRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<GetStackPolicyResult> responseHandler = new StaxResponseHandler<GetStackPolicyResult>(new GetStackPolicyResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * Returns the template body for a specified stack. You can get the template for running or * deleted stacks. * * <p>For deleted stacks, GetTemplate returns the template for up to 90 days after the stack has * been deleted. <note> If the template does not exist, a <code>ValidationError</code> is * returned. </note> * * @param getTemplateRequest The input for a <a>GetTemplate</a> action. * @return Result of the GetTemplate operation returned by the service. * @sample AmazonCloudFormation.GetTemplate */ @Override public GetTemplateResult getTemplate(GetTemplateRequest getTemplateRequest) { ExecutionContext executionContext = createExecutionContext(getTemplateRequest); AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<GetTemplateRequest> request = null; Response<GetTemplateResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new GetTemplateRequestMarshaller() .marshall(super.beforeMarshalling(getTemplateRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<GetTemplateResult> responseHandler = new StaxResponseHandler<GetTemplateResult>(new GetTemplateResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * Returns information about a new or existing template. The <code>GetTemplateSummary</code> * action is useful for viewing parameter information, such as default parameter values and * parameter types, before you create or update a stack. * * <p>You can use the <code>GetTemplateSummary</code> action when you submit a template, or you * can get template information for a running or deleted stack. * * <p>For deleted stacks, <code>GetTemplateSummary</code> returns the template information for up * to 90 days after the stack has been deleted. If the template does not exist, a <code> * ValidationError</code> is returned. * * @param getTemplateSummaryRequest The input for the <a>GetTemplateSummary</a> action. * @return Result of the GetTemplateSummary operation returned by the service. * @sample AmazonCloudFormation.GetTemplateSummary */ @Override public GetTemplateSummaryResult getTemplateSummary( GetTemplateSummaryRequest getTemplateSummaryRequest) { ExecutionContext executionContext = createExecutionContext(getTemplateSummaryRequest); AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<GetTemplateSummaryRequest> request = null; Response<GetTemplateSummaryResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new GetTemplateSummaryRequestMarshaller() .marshall(super.beforeMarshalling(getTemplateSummaryRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<GetTemplateSummaryResult> responseHandler = new StaxResponseHandler<GetTemplateSummaryResult>( new GetTemplateSummaryResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } @Override public GetTemplateSummaryResult getTemplateSummary() { return getTemplateSummary(new GetTemplateSummaryRequest()); } /** * Returns descriptions of all resources of the specified stack. * * <p>For deleted stacks, ListStackResources returns resource information for up to 90 days after * the stack has been deleted. * * @param listStackResourcesRequest The input for the <a>ListStackResource</a> action. * @return Result of the ListStackResources operation returned by the service. * @sample AmazonCloudFormation.ListStackResources */ @Override public ListStackResourcesResult listStackResources( ListStackResourcesRequest listStackResourcesRequest) { ExecutionContext executionContext = createExecutionContext(listStackResourcesRequest); AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<ListStackResourcesRequest> request = null; Response<ListStackResourcesResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new ListStackResourcesRequestMarshaller() .marshall(super.beforeMarshalling(listStackResourcesRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<ListStackResourcesResult> responseHandler = new StaxResponseHandler<ListStackResourcesResult>( new ListStackResourcesResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * Returns the summary information for stacks whose status matches the specified * StackStatusFilter. Summary information for stacks that have been deleted is kept for 90 days * after the stack is deleted. If no StackStatusFilter is specified, summary information for all * stacks is returned (including existing stacks and stacks that have been deleted). * * @param listStacksRequest The input for <a>ListStacks</a> action. * @return Result of the ListStacks operation returned by the service. * @sample AmazonCloudFormation.ListStacks */ @Override public ListStacksResult listStacks(ListStacksRequest listStacksRequest) { ExecutionContext executionContext = createExecutionContext(listStacksRequest); AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<ListStacksRequest> request = null; Response<ListStacksResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new ListStacksRequestMarshaller().marshall(super.beforeMarshalling(listStacksRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<ListStacksResult> responseHandler = new StaxResponseHandler<ListStacksResult>(new ListStacksResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } @Override public ListStacksResult listStacks() { return listStacks(new ListStacksRequest()); } /** * Sets a stack policy for a specified stack. * * @param setStackPolicyRequest The input for the <a>SetStackPolicy</a> action. * @sample AmazonCloudFormation.SetStackPolicy */ @Override public void setStackPolicy(SetStackPolicyRequest setStackPolicyRequest) { ExecutionContext executionContext = createExecutionContext(setStackPolicyRequest); AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<SetStackPolicyRequest> request = null; Response<Void> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new SetStackPolicyRequestMarshaller() .marshall(super.beforeMarshalling(setStackPolicyRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<Void> responseHandler = new StaxResponseHandler<Void>(null); invoke(request, responseHandler, executionContext); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * Sends a signal to the specified resource with a success or failure status. You can use the * SignalResource API in conjunction with a creation policy or update policy. AWS CloudFormation * doesn't proceed with a stack creation or update until resources receive the required number of * signals or the timeout period is exceeded. The SignalResource API is useful in cases where you * want to send signals from anywhere other than an Amazon EC2 instance. * * @param signalResourceRequest The input for the <a>SignalResource</a> action. * @sample AmazonCloudFormation.SignalResource */ @Override public void signalResource(SignalResourceRequest signalResourceRequest) { ExecutionContext executionContext = createExecutionContext(signalResourceRequest); AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<SignalResourceRequest> request = null; Response<Void> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new SignalResourceRequestMarshaller() .marshall(super.beforeMarshalling(signalResourceRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<Void> responseHandler = new StaxResponseHandler<Void>(null); invoke(request, responseHandler, executionContext); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * Updates a stack as specified in the template. After the call completes successfully, the stack * update starts. You can check the status of the stack via the <a>DescribeStacks</a> action. * * <p>To get a copy of the template for an existing stack, you can use the <a>GetTemplate</a> * action. * * <p>Tags that were associated with this stack during creation time will still be associated with * the stack after an <code>UpdateStack</code> operation. * * <p>For more information about creating an update template, updating a stack, and monitoring the * progress of the update, see <a href= * "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks.html" * >Updating a Stack</a>. * * @param updateStackRequest The input for <a>UpdateStack</a> action. * @return Result of the UpdateStack operation returned by the service. * @throws InsufficientCapabilitiesException The template contains resources with capabilities * that were not specified in the Capabilities parameter. * @sample AmazonCloudFormation.UpdateStack */ @Override public UpdateStackResult updateStack(UpdateStackRequest updateStackRequest) { ExecutionContext executionContext = createExecutionContext(updateStackRequest); AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<UpdateStackRequest> request = null; Response<UpdateStackResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new UpdateStackRequestMarshaller() .marshall(super.beforeMarshalling(updateStackRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<UpdateStackResult> responseHandler = new StaxResponseHandler<UpdateStackResult>(new UpdateStackResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * Validates a specified template. * * @param validateTemplateRequest The input for <a>ValidateTemplate</a> action. * @return Result of the ValidateTemplate operation returned by the service. * @sample AmazonCloudFormation.ValidateTemplate */ @Override public ValidateTemplateResult validateTemplate(ValidateTemplateRequest validateTemplateRequest) { ExecutionContext executionContext = createExecutionContext(validateTemplateRequest); AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<ValidateTemplateRequest> request = null; Response<ValidateTemplateResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new ValidateTemplateRequestMarshaller() .marshall(super.beforeMarshalling(validateTemplateRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<ValidateTemplateResult> responseHandler = new StaxResponseHandler<ValidateTemplateResult>( new ValidateTemplateResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * Returns additional metadata for a previously executed successful, request, typically used for * debugging issues where a service isn't acting as expected. This data isn't considered part of * the result data returned by an operation, so it's available through this separate, diagnostic * interface. * * <p>Response metadata is only cached for a limited period of time, so if you need to access this * extra diagnostic information for an executed request, you should use this method to retrieve it * as soon as possible after executing the request. * * @param request The originally executed request * @return The response metadata for the specified request, or null if none is available. */ public ResponseMetadata getCachedResponseMetadata(AmazonWebServiceRequest request) { return client.getResponseMetadataForRequest(request); } private <X, Y extends AmazonWebServiceRequest> Response<X> invoke( Request<Y> request, HttpResponseHandler<AmazonWebServiceResponse<X>> responseHandler, ExecutionContext executionContext) { request.setEndpoint(endpoint); request.setTimeOffset(timeOffset); AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics(); AWSCredentials credentials; awsRequestMetrics.startEvent(Field.CredentialsRequestTime); try { credentials = awsCredentialsProvider.getCredentials(); } finally { awsRequestMetrics.endEvent(Field.CredentialsRequestTime); } AmazonWebServiceRequest originalRequest = request.getOriginalRequest(); if (originalRequest != null && originalRequest.getRequestCredentials() != null) { credentials = originalRequest.getRequestCredentials(); } executionContext.setCredentials(credentials); DefaultErrorResponseHandler errorResponseHandler = new DefaultErrorResponseHandler(exceptionUnmarshallers); return client.execute(request, responseHandler, errorResponseHandler, executionContext); } }
/** * Shared functionality for hadoopStreaming formats. A custom reader can be defined to be a * RecordReader with the constructor below and is selected with the option bin/hadoopStreaming * -inputreader ... * * @see StreamXmlRecordReader */ public abstract class StreamBaseRecordReader implements RecordReader<Text, Text> { protected static final Log LOG = LogFactory.getLog(StreamBaseRecordReader.class.getName()); // custom JobConf properties for this class are prefixed with this namespace static final String CONF_NS = "stream.recordreader."; public StreamBaseRecordReader( FSDataInputStream in, FileSplit split, Reporter reporter, JobConf job, FileSystem fs) throws IOException { in_ = in; split_ = split; start_ = split_.getStart(); length_ = split_.getLength(); end_ = start_ + length_; splitName_ = split_.getPath().getName(); reporter_ = reporter; job_ = job; fs_ = fs; statusMaxRecordChars_ = job_.getInt(CONF_NS + "statuschars", 200); } /// RecordReader API /** Read a record. Implementation should call numRecStats at the end */ public abstract boolean next(Text key, Text value) throws IOException; /** This implementation always returns true. */ public void validateInput(JobConf job) throws IOException {} /** Returns the current position in the input. */ public synchronized long getPos() throws IOException { return in_.getPos(); } /** Close this to future operations. */ public synchronized void close() throws IOException { in_.close(); } public float getProgress() throws IOException { if (end_ == start_) { return 1.0f; } else { return ((float) (in_.getPos() - start_)) / ((float) (end_ - start_)); } } public Text createKey() { return new Text(); } public Text createValue() { return new Text(); } /// StreamBaseRecordReader API /** * Implementation should seek forward in_ to the first byte of the next record. The initial byte * offset in the stream is arbitrary. */ public abstract void seekNextRecordBoundary() throws IOException; void numRecStats(byte[] record, int start, int len) throws IOException { numRec_++; if (numRec_ == nextStatusRec_) { String recordStr = new String(record, start, Math.min(len, statusMaxRecordChars_), "UTF-8"); nextStatusRec_ += 100; // *= 10; String status = getStatus(recordStr); LOG.info(status); reporter_.setStatus(status); } } long lastMem = 0; String getStatus(CharSequence record) { long pos = -1; try { pos = getPos(); } catch (IOException io) { } String recStr; if (record.length() > statusMaxRecordChars_) { recStr = record.subSequence(0, statusMaxRecordChars_) + "..."; } else { recStr = record.toString(); } String unqualSplit = split_.getPath().getName() + ":" + split_.getStart() + "+" + split_.getLength(); String status = "HSTR " + StreamUtil.HOST + " " + numRec_ + ". pos=" + pos + " " + unqualSplit + " Processing record=" + recStr; status += " " + splitName_; return status; } FSDataInputStream in_; FileSplit split_; long start_; long end_; long length_; String splitName_; Reporter reporter_; JobConf job_; FileSystem fs_; int numRec_ = 0; int nextStatusRec_ = 1; int statusMaxRecordChars_; }
public class TestMRSequenceFileAsBinaryOutputFormat extends TestCase { private static final Log LOG = LogFactory.getLog(TestMRSequenceFileAsBinaryOutputFormat.class.getName()); private static final int RECORDS = 10000; public void testBinary() throws IOException, InterruptedException { Configuration conf = new Configuration(); Job job = new Job(conf); Path outdir = new Path(System.getProperty("test.build.data", "/tmp"), "outseq"); Random r = new Random(); long seed = r.nextLong(); r.setSeed(seed); FileOutputFormat.setOutputPath(job, outdir); SequenceFileAsBinaryOutputFormat.setSequenceFileOutputKeyClass(job, IntWritable.class); SequenceFileAsBinaryOutputFormat.setSequenceFileOutputValueClass(job, DoubleWritable.class); SequenceFileAsBinaryOutputFormat.setCompressOutput(job, true); SequenceFileAsBinaryOutputFormat.setOutputCompressionType(job, CompressionType.BLOCK); BytesWritable bkey = new BytesWritable(); BytesWritable bval = new BytesWritable(); TaskAttemptContext context = MapReduceTestUtil.createDummyMapTaskAttemptContext(job.getConfiguration()); OutputFormat<BytesWritable, BytesWritable> outputFormat = new SequenceFileAsBinaryOutputFormat(); OutputCommitter committer = outputFormat.getOutputCommitter(context); committer.setupJob(job); RecordWriter<BytesWritable, BytesWritable> writer = outputFormat.getRecordWriter(context); IntWritable iwritable = new IntWritable(); DoubleWritable dwritable = new DoubleWritable(); DataOutputBuffer outbuf = new DataOutputBuffer(); LOG.info("Creating data by SequenceFileAsBinaryOutputFormat"); try { for (int i = 0; i < RECORDS; ++i) { iwritable = new IntWritable(r.nextInt()); iwritable.write(outbuf); bkey.set(outbuf.getData(), 0, outbuf.getLength()); outbuf.reset(); dwritable = new DoubleWritable(r.nextDouble()); dwritable.write(outbuf); bval.set(outbuf.getData(), 0, outbuf.getLength()); outbuf.reset(); writer.write(bkey, bval); } } finally { writer.close(context); } committer.commitTask(context); committer.commitJob(job); InputFormat<IntWritable, DoubleWritable> iformat = new SequenceFileInputFormat<IntWritable, DoubleWritable>(); int count = 0; r.setSeed(seed); SequenceFileInputFormat.setInputPaths(job, outdir); LOG.info("Reading data by SequenceFileInputFormat"); for (InputSplit split : iformat.getSplits(job)) { RecordReader<IntWritable, DoubleWritable> reader = iformat.createRecordReader(split, context); MapContext<IntWritable, DoubleWritable, BytesWritable, BytesWritable> mcontext = new MapContextImpl<IntWritable, DoubleWritable, BytesWritable, BytesWritable>( job.getConfiguration(), context.getTaskAttemptID(), reader, null, null, MapReduceTestUtil.createDummyReporter(), split); reader.initialize(split, mcontext); try { int sourceInt; double sourceDouble; while (reader.nextKeyValue()) { sourceInt = r.nextInt(); sourceDouble = r.nextDouble(); iwritable = reader.getCurrentKey(); dwritable = reader.getCurrentValue(); assertEquals( "Keys don't match: " + "*" + iwritable.get() + ":" + sourceInt + "*", sourceInt, iwritable.get()); assertTrue( "Vals don't match: " + "*" + dwritable.get() + ":" + sourceDouble + "*", Double.compare(dwritable.get(), sourceDouble) == 0); ++count; } } finally { reader.close(); } } assertEquals("Some records not found", RECORDS, count); } public void testSequenceOutputClassDefaultsToMapRedOutputClass() throws IOException { Job job = new Job(); // Setting Random class to test getSequenceFileOutput{Key,Value}Class job.setOutputKeyClass(FloatWritable.class); job.setOutputValueClass(BooleanWritable.class); assertEquals( "SequenceFileOutputKeyClass should default to ouputKeyClass", FloatWritable.class, SequenceFileAsBinaryOutputFormat.getSequenceFileOutputKeyClass(job)); assertEquals( "SequenceFileOutputValueClass should default to " + "ouputValueClass", BooleanWritable.class, SequenceFileAsBinaryOutputFormat.getSequenceFileOutputValueClass(job)); SequenceFileAsBinaryOutputFormat.setSequenceFileOutputKeyClass(job, IntWritable.class); SequenceFileAsBinaryOutputFormat.setSequenceFileOutputValueClass(job, DoubleWritable.class); assertEquals( "SequenceFileOutputKeyClass not updated", IntWritable.class, SequenceFileAsBinaryOutputFormat.getSequenceFileOutputKeyClass(job)); assertEquals( "SequenceFileOutputValueClass not updated", DoubleWritable.class, SequenceFileAsBinaryOutputFormat.getSequenceFileOutputValueClass(job)); } public void testcheckOutputSpecsForbidRecordCompression() throws IOException { Job job = Job.getInstance(new Configuration(), "testcheckOutputSpecsForbidRecordCompression"); FileSystem fs = FileSystem.getLocal(job.getConfiguration()); Path outputdir = new Path(System.getProperty("test.build.data", "/tmp") + "/output"); fs.delete(outputdir, true); // Without outputpath, FileOutputFormat.checkoutputspecs will throw // InvalidJobConfException FileOutputFormat.setOutputPath(job, outputdir); // SequenceFileAsBinaryOutputFormat doesn't support record compression // It should throw an exception when checked by checkOutputSpecs SequenceFileAsBinaryOutputFormat.setCompressOutput(job, true); SequenceFileAsBinaryOutputFormat.setOutputCompressionType(job, CompressionType.BLOCK); try { new SequenceFileAsBinaryOutputFormat().checkOutputSpecs(job); } catch (Exception e) { fail( "Block compression should be allowed for " + "SequenceFileAsBinaryOutputFormat:Caught " + e.getClass().getName()); } SequenceFileAsBinaryOutputFormat.setOutputCompressionType(job, CompressionType.RECORD); try { new SequenceFileAsBinaryOutputFormat().checkOutputSpecs(job); fail("Record compression should not be allowed for " + "SequenceFileAsBinaryOutputFormat"); } catch (InvalidJobConfException ie) { // expected } catch (Exception e) { fail( "Expected " + InvalidJobConfException.class.getName() + "but caught " + e.getClass().getName()); } } }
/** * Generic session containing id of logged user. * @author Zdenda * */ public class GenericSession extends AuthenticatedWebSession { private static final long serialVersionUID = 1L; protected final Log logger = LogFactory.getLog(getClass()); /** * Id of logged member */ private Long memberId; @SpringBean(name = "membernetManager") private MembernetManager membernetManager; public GenericSession(Request request) { super(request); //so the autowiring works org.apache.wicket.injection.Injector.get().inject(this); } /** * For now, the username will be the memberId. * Method will try to parse the memberId and check if it exists. * If the parsing is ok and member with this id exists, then true is returned. */ @Override public boolean authenticate(String username, String password) { try { //try to parse it memberId = Long.parseLong(username); if (membernetManager == null) { logger.error("MembernetManager is null."); return false; } //check if exists if(membernetManager.exists(memberId)) { logger.debug("Successfully logged as member id="+memberId); return true; } else { logger.debug("Failed to log as member id="+memberId+". Member doesn't exist."); return false; } } catch (NumberFormatException e) { logger.warn("Error when parsing memberId: "+username); return false; } } @Override public void invalidate() { super.invalidate(); memberId = null; } @Override public Roles getRoles() { return null; } public long getLoggedMemberId() { return memberId; } }
/** * To generate automatically reports from list mode. * * <p>Uses JasperReports. * * @author Javier Paniza */ public class GenerateReportServlet extends HttpServlet { private static Log log = LogFactory.getLog(GenerateReportServlet.class); public static class TableModelDecorator implements TableModel { private TableModel original; private List metaProperties; private boolean withValidValues = false; private Locale locale; private boolean labelAsHeader = false; private HttpServletRequest request; private boolean format = false; // format or no the values. If format = true, all values to the report are String private Integer columnCountLimit; public TableModelDecorator( HttpServletRequest request, TableModel original, List metaProperties, Locale locale, boolean labelAsHeader, boolean format, Integer columnCountLimit) throws Exception { this.request = request; this.original = original; this.metaProperties = metaProperties; this.locale = locale; this.withValidValues = calculateWithValidValues(); this.labelAsHeader = labelAsHeader; this.format = format; this.columnCountLimit = columnCountLimit; } private boolean calculateWithValidValues() { Iterator it = metaProperties.iterator(); while (it.hasNext()) { MetaProperty m = (MetaProperty) it.next(); if (m.hasValidValues()) return true; } return false; } private MetaProperty getMetaProperty(int i) { return (MetaProperty) metaProperties.get(i); } public int getRowCount() { return original.getRowCount(); } public int getColumnCount() { return columnCountLimit == null ? original.getColumnCount() : columnCountLimit; } public String getColumnName(int c) { return labelAsHeader ? getMetaProperty(c).getLabel(locale) : Strings.change(getMetaProperty(c).getQualifiedName(), ".", "_"); } public Class getColumnClass(int c) { return original.getColumnClass(c); } public boolean isCellEditable(int row, int column) { return original.isCellEditable(row, column); } public Object getValueAt(int row, int column) { if (isFormat()) return getValueWithWebEditorsFormat(row, column); else return getValueWithoutWebEditorsFormat(row, column); } private Object getValueWithoutWebEditorsFormat(int row, int column) { Object r = original.getValueAt(row, column); if (r instanceof Boolean) { if (((Boolean) r).booleanValue()) return XavaResources.getString(locale, "yes"); return XavaResources.getString(locale, "no"); } if (withValidValues) { MetaProperty p = getMetaProperty(column); if (p.hasValidValues()) { return p.getValidValueLabel(locale, original.getValueAt(row, column)); } } if (r instanceof java.util.Date) { MetaProperty p = getMetaProperty(column); // In order to use the type declared by the developer // and not the one returned by JDBC or the JPA engine if (java.sql.Time.class.isAssignableFrom(p.getType())) { return DateFormat.getTimeInstance(DateFormat.SHORT, locale).format(r); } if (java.sql.Timestamp.class.isAssignableFrom(p.getType())) { DateFormat dateFormat = new SimpleDateFormat("dd/MM/yyyy HH:mm:ss"); return dateFormat.format(r); } return DateFormat.getDateInstance(DateFormat.SHORT, locale).format(r); } if (r instanceof BigDecimal) { return formatBigDecimal(r, locale); } return r; } private Object getValueWithWebEditorsFormat(int row, int column) { Object r = original.getValueAt(row, column); MetaProperty metaProperty = getMetaProperty(column); String result = WebEditors.format(this.request, metaProperty, r, null, "", true); if (isHtml(result)) { // this avoids that the report shows html content result = WebEditors.format(this.request, metaProperty, r, null, "", false); } return result; } public void setValueAt(Object value, int row, int column) { original.setValueAt(value, row, column); } public void addTableModelListener(TableModelListener l) { original.addTableModelListener(l); } public void removeTableModelListener(TableModelListener l) { original.removeTableModelListener(l); } private boolean isHtml(String value) { return value.matches("<.*>"); } public boolean isFormat() { return format; } public void setFormat(boolean format) { this.format = format; } } protected void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { try { Locales.setCurrent(request); if (Users.getCurrent() == null) { // for a bug in websphere portal 5.1 with Domino LDAP Users.setCurrent((String) request.getSession().getAttribute("xava.user")); } request.getParameter("application"); // for a bug in websphere 5.1 request.getParameter("module"); // for a bug in websphere 5.1 Tab tab = (Tab) request.getSession().getAttribute("xava_reportTab"); int[] selectedRowsNumber = (int[]) request.getSession().getAttribute("xava_selectedRowsReportTab"); Map[] selectedKeys = (Map[]) request.getSession().getAttribute("xava_selectedKeysReportTab"); int[] selectedRows = getSelectedRows(selectedRowsNumber, selectedKeys, tab); request.getSession().removeAttribute("xava_selectedRowsReportTab"); Integer columnCountLimit = (Integer) request.getSession().getAttribute("xava_columnCountLimitReportTab"); request.getSession().removeAttribute("xava_columnCountLimitReportTab"); setDefaultSchema(request); String user = (String) request.getSession().getAttribute("xava_user"); request.getSession().removeAttribute("xava_user"); Users.setCurrent(user); String uri = request.getRequestURI(); if (uri.endsWith(".pdf")) { InputStream is; JRDataSource ds; Map parameters = new HashMap(); synchronized (tab) { tab.setRequest(request); parameters.put("Title", tab.getTitle()); parameters.put("Organization", getOrganization()); parameters.put("Date", getCurrentDate()); for (String totalProperty : tab.getTotalPropertiesNames()) { parameters.put(totalProperty + "__TOTAL__", getTotal(request, tab, totalProperty)); } TableModel tableModel = getTableModel(request, tab, selectedRows, false, true, null); tableModel.getValueAt(0, 0); if (tableModel.getRowCount() == 0) { generateNoRowsPage(response); return; } is = getReport(request, response, tab, tableModel, columnCountLimit); ds = new JRTableModelDataSource(tableModel); } JasperPrint jprint = JasperFillManager.fillReport(is, parameters, ds); response.setContentType("application/pdf"); response.setHeader( "Content-Disposition", "inline; filename=\"" + getFileName(tab) + ".pdf\""); JasperExportManager.exportReportToPdfStream(jprint, response.getOutputStream()); } else if (uri.endsWith(".csv")) { String csvEncoding = XavaPreferences.getInstance().getCSVEncoding(); if (!Is.emptyString(csvEncoding)) { response.setCharacterEncoding(csvEncoding); } response.setContentType("text/x-csv"); response.setHeader( "Content-Disposition", "inline; filename=\"" + getFileName(tab) + ".csv\""); synchronized (tab) { tab.setRequest(request); response .getWriter() .print( TableModels.toCSV( getTableModel(request, tab, selectedRows, true, false, columnCountLimit))); } } else { throw new ServletException( XavaResources.getString("report_type_not_supported", "", ".pdf .csv")); } } catch (Exception ex) { log.error(ex.getMessage(), ex); throw new ServletException(XavaResources.getString("report_error")); } finally { request.getSession().removeAttribute("xava_reportTab"); } } private void generateNoRowsPage(HttpServletResponse response) throws Exception { response.setContentType("text/html"); response.getWriter().println("<html><head><title>"); response.getWriter().println(XavaResources.getString("no_rows_report_message_title")); response .getWriter() .println( "</title></head><body style='font-family:Tahoma,Arial,sans-serif;color:black;background-color:white;'>"); response.getWriter().println("<h1 style='font-size:22px;'>"); response.getWriter().println(XavaResources.getString("no_rows_report_message_title")); response.getWriter().println("</h1>"); response.getWriter().println("<p style='font-size:16px;'>"); response.getWriter().println(XavaResources.getString("no_rows_report_message_detail")); response.getWriter().println("</p></body></html>"); } private String getCurrentDate() { return java.text.DateFormat.getDateInstance(DateFormat.MEDIUM, Locales.getCurrent()) .format(new java.util.Date()); } private String getFileName(Tab tab) { String now = new SimpleDateFormat("yyyyMMdd_HHmm").format(new Date()); return tab.getTitle() + " " + now; } private Object getTotal(HttpServletRequest request, Tab tab, String totalProperty) { Object total = tab.getTotal(totalProperty); return WebEditors.format( request, tab.getMetaProperty(totalProperty), total, new Messages(), null, true); } private void setDefaultSchema(HttpServletRequest request) { String hibernateDefaultSchemaTab = (String) request.getSession().getAttribute("xava_hibernateDefaultSchemaTab"); if (hibernateDefaultSchemaTab != null) { request.getSession().removeAttribute("xava_hibernateDefaultSchemaTab"); XHibernate.setDefaultSchema(hibernateDefaultSchemaTab); } String jpaDefaultSchemaTab = (String) request.getSession().getAttribute("xava_jpaDefaultSchemaTab"); if (jpaDefaultSchemaTab != null) { request.getSession().removeAttribute("xava_jpaDefaultSchemaTab"); XPersistence.setDefaultSchema(jpaDefaultSchemaTab); } } protected String getOrganization() throws MissingResourceException, XavaException { return ReportParametersProviderFactory.getInstance().getOrganization(); } private InputStream getReport( HttpServletRequest request, HttpServletResponse response, Tab tab, TableModel tableModel, Integer columnCountLimit) throws ServletException, IOException { StringBuffer suri = new StringBuffer(); suri.append("/xava/jasperReport"); suri.append("?language="); suri.append(Locales.getCurrent().getLanguage()); suri.append("&widths="); suri.append(Arrays.toString(getWidths(tableModel))); if (columnCountLimit != null) { suri.append("&columnCountLimit="); suri.append(columnCountLimit); } response.setCharacterEncoding(XSystem.getEncoding()); return Servlets.getURIAsStream(request, response, suri.toString()); } private int[] getWidths(TableModel tableModel) { int[] widths = new int[tableModel.getColumnCount()]; for (int r = 0; r < Math.min(tableModel.getRowCount(), 500); r++) { // 500 is not for performance, but for using only a sample of data with huge table for (int c = 0; c < tableModel.getColumnCount(); c++) { Object o = tableModel.getValueAt(r, c); if (o instanceof String) { String s = ((String) o).trim(); if (s.length() > widths[c]) widths[c] = s.length(); } } } return widths; } private TableModel getTableModel( HttpServletRequest request, Tab tab, int[] selectedRows, boolean labelAsHeader, boolean format, Integer columnCountLimit) throws Exception { TableModel data = null; if (selectedRows != null && selectedRows.length > 0) { data = new SelectedRowsXTableModel(tab.getTableModel(), selectedRows); } else { data = tab.getAllDataTableModel(); } return new TableModelDecorator( request, data, tab.getMetaProperties(), Locales.getCurrent(), labelAsHeader, format, columnCountLimit); } private static Object formatBigDecimal(Object number, Locale locale) { NumberFormat nf = NumberFormat.getNumberInstance(locale); nf.setMinimumFractionDigits(2); return nf.format(number); } private int[] getSelectedRows(int[] selectedRowsNumber, Map[] selectedRowsKeys, Tab tab) { if (selectedRowsKeys == null || selectedRowsKeys.length == 0) return new int[0]; // selectedRowsNumber is the most performant so we use it when possible else if (selectedRowsNumber.length == selectedRowsKeys.length) return selectedRowsNumber; else { // find the rows from the selectedKeys // This has a poor performance, but it covers the case when the selected // rows are not loaded for the tab, something that can occurs if the user // select rows and afterwards reorder the list. try { int[] s = new int[selectedRowsKeys.length]; List selectedKeys = Arrays.asList(selectedRowsKeys); int end = tab.getTableModel().getTotalSize(); int x = 0; for (int i = 0; i < end; i++) { Map key = (Map) tab.getTableModel().getObjectAt(i); if (selectedKeys.contains(key)) { s[x] = i; x++; } } return s; } catch (Exception ex) { log.warn(XavaResources.getString("fails_selected"), ex); throw new XavaException("fails_selected"); } } } }
/** * MDC Connector * * @author yjiang */ public abstract class MDCConnector extends IoHandlerAdapter { static final Log log = LogFactory.getLog(MDCConnector.class); /** the the max size of a packet, 32KB */ static int MAX_SIZE = MDCServer.MAX_SIZE; protected Selector selector; protected IoConnector connector; protected static Configuration _conf; protected static boolean inited = false; /** Close. */ public void close() { if (selector != null) { selector.wakeup(); try { selector.close(); } catch (IOException e1) { log.warn("close selector fails", e1); } finally { selector = null; } } if (connector != null) { connector.dispose(); connector = null; } } /** Instantiates a new MDC connector. */ protected MDCConnector() {} /** Inits the. */ public static synchronized void init() { if (inited) { return; } _conf = Config.getConfig(); /** initialize app command */ Command.init(); /** initialize the RSA key, hardcode 2048 bits */ TConn.pub_key = SystemConfig.s("pub_key", null); if (TConn.pub_key == null) { Key k = RSA.generate(2048); TConn.pri_key = k.pri_key; TConn.pub_key = k.pub_key; /** set back in database */ SystemConfig.setConfig("pri_key", TConn.pri_key); SystemConfig.setConfig("pub_key", TConn.pub_key); } else { /** get from the database */ TConn.pri_key = SystemConfig.s("pri_key", null); } inited = true; } /** * Service. * * @param o the o * @param session the session */ void service(IoBuffer o, IoSession session) { try { // System.out.println(o.remaining() + "/" + o.capacity()); session.setAttribute("last", System.currentTimeMillis()); SimpleIoBuffer in = (SimpleIoBuffer) session.getAttribute("buf"); if (in == null) { in = SimpleIoBuffer.create(4096); session.setAttribute("buf", in); } byte[] data = new byte[o.remaining()]; o.get(data); in.append(data); // log.debug("recv: " + data.length + ", " + // session.getRemoteAddress()); while (in.length() > 5) { in.mark(); /** * Byte 1: head of the package<br> * bit 7-6: "01", indicator of MDC<br> * bit 5: encrypt indicator, "0": no; "1": encrypted<br> * bit 4: zip indicator, "0": no, "1": ziped<br> * bit 0-3: reserved<br> * Byte 2-5: length of data<br> * Byte[…]: data array<br> */ byte head = in.read(); /** test the head indicator, if not correct close it */ if ((head & 0xC0) != 0x40) { log.info("flag is not correct! flag:" + head + ",from: " + session.getRemoteAddress()); session.close(true); return; } int len = in.getInt(); if (len <= 0 || len > MAX_SIZE) { log.error( "mdcconnector.Wrong lendth: " + len + "/" + MAX_SIZE + " - " + session.getRemoteAddress()); session.close(true); break; } if (in.length() < len) { in.reset(); break; } else { // do it // log.info("stub.package.size: " + len); byte[] b = new byte[len]; in.read(b); if (TConn.DEBUG) { log.debug("recv: " + Bean.toString(b)); } /** test the zip flag */ if ((head & 0x10) > 0) { b = Zip.unzip(b); } final TConn d = (TConn) session.getAttribute("conn"); if (d != null) { /** test the encrypted flag */ if ((head & 0x20) > 0) { b = DES.decode(b, d.deskey); } final byte[] bb = b; /** test if the packet is for mdc or app */ new WorkerTask() { @Override public void onExecute() { d.process(bb); } }.schedule(0); session.setAttribute("last", System.currentTimeMillis()); } } } } catch (Throwable e) { log.error("closing stub: " + session.getRemoteAddress(), e); session.close(true); } } /* * (non-Javadoc) * * @see * org.apache.mina.core.service.IoHandlerAdapter#sessionCreated(org.apache * .mina.core.session.IoSession) */ public void sessionCreated(IoSession session) throws Exception { String remote = session.getRemoteAddress().toString(); log.info("stub created:" + remote); /** check the allow ip */ if (TConn.ALLOW_IP == null || "*".equals(TConn.ALLOW_IP) || remote.matches(TConn.ALLOW_IP)) { TConn d = new TConn(session); session.setAttribute("conn", d); } else { log.warn("deny the connection:" + remote + ", allow ip:" + TConn.ALLOW_IP); session.close(true); } } /* * (non-Javadoc) * * @see * org.apache.mina.core.service.IoHandlerAdapter#sessionClosed(org.apache * .mina.core.session.IoSession) */ public void sessionClosed(IoSession session) throws Exception { log.debug("closed stub: " + session.getRemoteAddress()); TConn d = (TConn) session.getAttribute("conn"); if (d != null) { d.close(); } } /* * (non-Javadoc) * * @see * org.apache.mina.core.service.IoHandlerAdapter#sessionIdle(org.apache. * mina.core.session.IoSession, org.apache.mina.core.session.IdleStatus) */ public void sessionIdle(IoSession session, IdleStatus status) throws Exception { if (IdleStatus.BOTH_IDLE.equals(status)) { Long l = (Long) session.getAttribute("last"); if (l != null && System.currentTimeMillis() - l > 60 * 1000) { session.close(true); } } } /* * (non-Javadoc) * * @see * org.apache.mina.core.service.IoHandlerAdapter#messageReceived(org.apache * .mina.core.session.IoSession, java.lang.Object) */ public void messageReceived(IoSession session, Object message) throws Exception { // System.out.println(message); if (message instanceof IoBuffer) { service((IoBuffer) message, session); } } private static MDCConnector tcpconnector; private static MDCConnector udpconnector; /** * @param host * @param port * @return TConn */ public static synchronized TConn connectByTcp(String host, int port) { return connectByTcp(host, port, X.AMINUTE); } /** * Connect by tcp. * * @param host the host * @param port the port * @return the t conn */ public static synchronized TConn connectByTcp(String host, int port, long timeout) { TimeStamp t = TimeStamp.create(); try { if (tcpconnector == null) { tcpconnector = new TDCConnector(); } tcpconnector.connector.setConnectTimeoutMillis(timeout); ConnectFuture connFuture = tcpconnector.connector.connect(new InetSocketAddress(host, port)); connFuture.awaitUninterruptibly(timeout); IoSession session = connFuture.getSession(); TConn c = new TConn(session); session.setAttribute("conn", c); return c; } catch (Exception e) { log.error( "error, [" + host + ":" + port + "], cost: " + t.past() + "ms, timeout=" + timeout, e); } return null; } /** * Connect by udp. * * @param host the host * @param port the port * @return the t conn */ public static synchronized TConn connectByUdp(String host, int port) { try { if (udpconnector == null) { udpconnector = new UDCConnector(); } ConnectFuture connFuture = udpconnector.connector.connect(new InetSocketAddress(host, port)); connFuture.awaitUninterruptibly(); IoSession session = connFuture.getSession(); TConn c = new TConn(session); session.setAttribute("conn", c); return c; } catch (Exception e) { log.error("[" + host + ":" + port + "]", e); } return null; } /* * (non-Javadoc) * * @see * org.apache.mina.core.service.IoHandlerAdapter#exceptionCaught(org.apache * .mina.core.session.IoSession, java.lang.Throwable) */ @Override public void exceptionCaught(IoSession session, Throwable cause) throws Exception { log.error(cause.getMessage(), cause); } }
/** @author Javier Paniza */ public class MetaFinder implements Serializable { private static Log log = LogFactory.getLog(MetaFinder.class); private static Map argumentsJBoss11ToEJBQL; private static Map argumentsToHQL; private static Map tokensToChangeDollarsAndNL; private String name; private String arguments; private boolean collection; private String condition; private String order; private MetaModel metaModel; public String getArguments() { arguments = Strings.change(arguments, "String", "java.lang.String"); arguments = Strings.change(arguments, "java.lang.java.lang.String", "java.lang.String"); return arguments; } public Collection getMetaPropertiesArguments() throws XavaException { StringTokenizer st = new StringTokenizer(getArguments(), ","); Collection result = new ArrayList(); while (st.hasMoreTokens()) { String argument = st.nextToken(); StringTokenizer argumentSt = new StringTokenizer(argument); String type = argumentSt.nextToken().trim(); String name = argumentSt.nextToken().trim(); MetaProperty p = new MetaProperty(); p.setName(name); p.setTypeName(type); result.add(p); } return result; } public boolean isCollection() { return collection; } public String getCondition() { return condition; } public String getName() { return name; } public void setArguments(String arguments) { this.arguments = arguments; } public void setCollection(boolean collection) { this.collection = collection; } public void setCondition(String condition) { this.condition = condition; } public void setName(String name) { this.name = name; } public boolean isSupportedForEJB2() throws XavaException { return !hasSome3LevelProperty(getCondition()) && !hasSome3LevelProperty(getOrder()); } private boolean hasSome3LevelProperty(String sentence) throws XavaException { if (sentence == null) return false; int i = sentence.indexOf("${"); int f = 0; while (i >= 0) { f = sentence.indexOf("}", i + 2); if (f < 0) break; String property = sentence.substring(i + 2, f); StringTokenizer st = new StringTokenizer(property, "."); if (st.countTokens() > 3) { log.warn(XavaResources.getString("property_3_level_in_ejb2_finder", property, getName())); return true; } if (st.countTokens() == 3) { if (!getMetaModel().getMetaProperty(property).isKey()) { log.warn(XavaResources.getString("property_3_level_in_ejb2_finder", property, getName())); return true; } } i = sentence.indexOf("${", i + 1); } return false; } public String getEJBQLCondition() throws XavaException { StringBuffer sb = new StringBuffer("SELECT OBJECT(o) FROM "); sb.append(getMetaModel().getName()); sb.append(" o"); if (!Is.emptyString(this.condition)) { sb.append(" WHERE "); String attributesCondition = getMetaModel().getMapping().changePropertiesByCMPAttributes(this.condition); sb.append(Strings.change(attributesCondition, getArgumentsJBoss11ToEJBQL())); } if (!Is.emptyString(this.order)) { sb.append(" ORDER BY "); sb.append(getMetaModel().getMapping().changePropertiesByCMPAttributes(this.order)); } return sb.toString(); } public String getHQLCondition() throws XavaException { return getHQLCondition(true); } private String getHQLCondition(boolean order) throws XavaException { StringBuffer sb = new StringBuffer("from "); sb.append(getMetaModel().getName()); sb.append(" as o"); if (!Is.emptyString(this.condition)) { sb.append(" where "); String condition = transformAggregateProperties(getCondition()); condition = Strings.change(condition, getArgumentsToHQL()); sb.append(Strings.change(condition, getTokensToChangeDollarsAndNL())); } if (order && !Is.emptyString(this.order)) { sb.append(" order by "); sb.append( Strings.change( transformAggregateProperties(this.order), getTokensToChangeDollarsAndNL())); } return sb.toString(); } /** * Transforms ${address.street} in ${address_street} if address if an aggregate of container * model. * * @param condition * @return */ private String transformAggregateProperties(String condition) { int i = condition.indexOf("${"); if (i < 0) return condition; StringBuffer result = new StringBuffer(condition.substring(0, i + 2)); while (i >= 0) { int f = condition.indexOf("}", i); String property = condition.substring(i + 2, f); String transformedProperty = transformAgregateProperty(property); result.append(transformedProperty); i = condition.indexOf("${", f); if (i >= 0) result.append(condition.substring(f, i)); else result.append(condition.substring(f)); } return result.toString(); } private String transformAgregateProperty(String property) { StringBuffer result = new StringBuffer(); StringTokenizer st = new StringTokenizer(property, "."); String member = ""; while (st.hasMoreTokens()) { String token = st.nextToken(); result.append(token); if (!st.hasMoreTokens()) break; member = member + token; try { MetaReference ref = getMetaModel().getMetaReference(member); if (ref.isAggregate()) result.append('_'); else result.append('.'); } catch (XavaException ex) { result.append('.'); } member = member + "."; } return result.toString(); } public String getHQLCountSentence() throws XavaException { StringBuffer sb = new StringBuffer("select count(*) "); sb.append(getHQLCondition(false)); return sb.toString(); } public MetaModel getMetaModel() { return metaModel; } public void setMetaModel(MetaModel metaModel) { this.metaModel = metaModel; } public String getOrder() { return order; } public void setOrder(String order) { this.order = order; } private static Map getArgumentsJBoss11ToEJBQL() { if (argumentsJBoss11ToEJBQL == null) { argumentsJBoss11ToEJBQL = new HashMap(); for (int i = 0; i < 30; i++) { argumentsJBoss11ToEJBQL.put("{" + i + "}", "?" + (i + 1)); } } return argumentsJBoss11ToEJBQL; } private static Map getArgumentsToHQL() { if (argumentsToHQL == null) { argumentsToHQL = new HashMap(); for (int i = 0; i < 30; i++) { argumentsToHQL.put("{" + i + "}", ":arg" + i); } } return argumentsToHQL; } static Map getTokensToChangeDollarsAndNL() { if (tokensToChangeDollarsAndNL == null) { tokensToChangeDollarsAndNL = new HashMap(); tokensToChangeDollarsAndNL.put("${", "o."); tokensToChangeDollarsAndNL.put("}", ""); tokensToChangeDollarsAndNL.put("\n", ""); } return tokensToChangeDollarsAndNL; } public boolean equals(Object other) { if (!(other instanceof MetaFinder)) return false; return toString().equals(other.toString()); } public int hashCode() { return toString().hashCode(); } public String toString() { return "Finder: " + getMetaModel().getName() + "." + getName(); } }
public class IdentitySchema { private static final String IDENTITY_TABLE_PREFIX = "JBPM_ID_"; Configuration configuration = null; Properties properties = null; Dialect dialect = null; Mapping mapping = null; String[] createSql = null; String[] dropSql = null; String[] cleanSql = null; ConnectionProvider connectionProvider = null; Connection connection = null; Statement statement = null; public IdentitySchema(Configuration configuration) { this.configuration = configuration; this.properties = configuration.getProperties(); this.dialect = Dialect.getDialect(properties); try { // get the mapping field via reflection :-( Field mappingField = Configuration.class.getDeclaredField("mapping"); mappingField.setAccessible(true); this.mapping = (Mapping) mappingField.get(configuration); } catch (Exception e) { throw new RuntimeException("couldn't get the hibernate mapping", e); } } // scripts lazy initializations ///////////////////////////////////////////// public String[] getCreateSql() { if (createSql == null) { createSql = configuration.generateSchemaCreationScript(dialect); } return createSql; } public String[] getDropSql() { if (dropSql == null) { dropSql = configuration.generateDropSchemaScript(dialect); } return dropSql; } public String[] getCleanSql() { if (cleanSql == null) { // loop over all foreign key constraints List dropForeignKeysSql = new ArrayList(); List createForeignKeysSql = new ArrayList(); Iterator iter = configuration.getTableMappings(); while (iter.hasNext()) { Table table = (Table) iter.next(); if (table.isPhysicalTable()) { Iterator subIter = table.getForeignKeyIterator(); while (subIter.hasNext()) { ForeignKey fk = (ForeignKey) subIter.next(); if (fk.isPhysicalConstraint()) { // collect the drop key constraint dropForeignKeysSql.add( fk.sqlDropString( dialect, properties.getProperty(Environment.DEFAULT_CATALOG), properties.getProperty(Environment.DEFAULT_SCHEMA))); createForeignKeysSql.add( fk.sqlCreateString( dialect, mapping, properties.getProperty(Environment.DEFAULT_CATALOG), properties.getProperty(Environment.DEFAULT_SCHEMA))); } } } } List deleteSql = new ArrayList(); iter = configuration.getTableMappings(); while (iter.hasNext()) { Table table = (Table) iter.next(); deleteSql.add("delete from " + table.getName()); } List cleanSqlList = new ArrayList(); cleanSqlList.addAll(dropForeignKeysSql); cleanSqlList.addAll(deleteSql); cleanSqlList.addAll(createForeignKeysSql); cleanSql = (String[]) cleanSqlList.toArray(new String[cleanSqlList.size()]); } return cleanSql; } // runtime table detection ////////////////////////////////////////////////// public boolean hasIdentityTables() { return (getIdentityTables().size() > 0); } public List getIdentityTables() { // delete all the data in the jbpm tables List jbpmTableNames = new ArrayList(); try { createConnection(); ResultSet resultSet = connection.getMetaData().getTables("", "", null, null); while (resultSet.next()) { String tableName = resultSet.getString("TABLE_NAME"); if ((tableName != null) && (tableName.length() > 5) && (IDENTITY_TABLE_PREFIX.equalsIgnoreCase(tableName.substring(0, 5)))) { jbpmTableNames.add(tableName); } } } catch (SQLException e) { throw new RuntimeException("couldn't get the jbpm table names"); } finally { closeConnection(); } return jbpmTableNames; } // script execution methods ///////////////////////////////////////////////// public void dropSchema() { execute(getDropSql()); } public void createSchema() { execute(getCreateSql()); } public void cleanSchema() { execute(getCleanSql()); } public void saveSqlScripts(String dir, String prefix) { try { new File(dir).mkdirs(); saveSqlScript(dir + "/" + prefix + ".drop.sql", getDropSql()); saveSqlScript(dir + "/" + prefix + ".create.sql", getCreateSql()); saveSqlScript(dir + "/" + prefix + ".clean.sql", getCleanSql()); new SchemaExport(configuration) .setDelimiter(getSqlDelimiter()) .setOutputFile(dir + "/" + prefix + ".drop.create.sql") .create(true, false); } catch (Exception e) { throw new RuntimeException("couldn't generate scripts", e); } } // main ///////////////////////////////////////////////////////////////////// public static void main(String[] args) { try { if ((args != null) && (args.length == 1) && ("create".equalsIgnoreCase(args[0]))) { new IdentitySchema(IdentitySessionFactory.createConfiguration()).createSchema(); } else if ((args != null) && (args.length == 1) && ("drop".equalsIgnoreCase(args[0]))) { new IdentitySchema(IdentitySessionFactory.createConfiguration()).dropSchema(); } else if ((args != null) && (args.length == 1) && ("clean".equalsIgnoreCase(args[0]))) { new IdentitySchema(IdentitySessionFactory.createConfiguration()).cleanSchema(); } else if ((args != null) && (args.length == 3) && ("scripts".equalsIgnoreCase(args[0]))) { new IdentitySchema(IdentitySessionFactory.createConfiguration()) .saveSqlScripts(args[1], args[2]); } else { System.err.println("syntax: JbpmSchema create"); System.err.println("syntax: JbpmSchema drop"); System.err.println("syntax: JbpmSchema clean"); System.err.println("syntax: JbpmSchema scripts <dir> <prefix>"); } } catch (Exception e) { e.printStackTrace(); throw new RuntimeException(e); } } private void saveSqlScript(String fileName, String[] sql) throws FileNotFoundException { FileOutputStream fileOutputStream = new FileOutputStream(fileName); PrintStream printStream = new PrintStream(fileOutputStream); for (int i = 0; i < sql.length; i++) { printStream.println(sql[i] + getSqlDelimiter()); } } // sql script execution ///////////////////////////////////////////////////// public void execute(String[] sqls) { String sql = null; String showSqlText = properties.getProperty("hibernate.show_sql"); boolean showSql = ("true".equalsIgnoreCase(showSqlText)); try { createConnection(); statement = connection.createStatement(); for (int i = 0; i < sqls.length; i++) { sql = sqls[i]; String delimitedSql = sql + getSqlDelimiter(); if (showSql) log.debug(delimitedSql); statement.executeUpdate(delimitedSql); } } catch (SQLException e) { e.printStackTrace(); throw new RuntimeException("couldn't execute sql '" + sql + "'", e); } finally { closeConnection(); } } private void closeConnection() { try { if (statement != null) statement.close(); if (connection != null) { JDBCExceptionReporter.logWarnings(connection.getWarnings()); connection.clearWarnings(); connectionProvider.closeConnection(connection); connectionProvider.close(); } } catch (Exception e) { System.err.println("Could not close connection"); e.printStackTrace(); } } private void createConnection() throws SQLException { connectionProvider = ConnectionProviderFactory.newConnectionProvider(properties); connection = connectionProvider.getConnection(); if (!connection.getAutoCommit()) { connection.commit(); connection.setAutoCommit(true); } } public Properties getProperties() { return properties; } // sql delimiter //////////////////////////////////////////////////////////// private static String sqlDelimiter = null; private synchronized String getSqlDelimiter() { if (sqlDelimiter == null) { sqlDelimiter = properties.getProperty("jbpm.sql.delimiter", ";"); } return sqlDelimiter; } // logger /////////////////////////////////////////////////////////////////// private static final Log log = LogFactory.getLog(IdentitySchema.class); }
/** * 領域マスタデータアクセスクラス。 ID RCSfile="$RCSfile: MasterRyouikiInfoDao.java,v $" Revision="$Revision: 1.1 $" * Date="$Date: 2007/06/28 02:06:50 $" */ public class MasterRyouikiInfoDao { // --------------------------------------------------------------------- // Static data // --------------------------------------------------------------------- /** ログ */ protected static final Log log = LogFactory.getLog(MasterRyouikiInfoDao.class); // --------------------------------------------------------------------- // Instance data // --------------------------------------------------------------------- /** 実行するユーザ情報 */ private UserInfo userInfo = null; // --------------------------------------------------------------------- // Constructors // --------------------------------------------------------------------- /** * コンストラクタ。 * * @param userInfo 実行するユーザ情報 */ public MasterRyouikiInfoDao(UserInfo userInfo) { this.userInfo = userInfo; } // --------------------------------------------------------------------- // Public Methods // --------------------------------------------------------------------- /** * 領域の一覧(コンポボックス用)を取得する。 * * @param connection コネクション * @return 事業情報 * @throws ApplicationException */ public static List selectRyouikiKubunInfoList(Connection connection) throws ApplicationException, NoDataFoundException { // ----------------------- // SQL文の作成 // ----------------------- String select = "SELECT" + " A.RYOIKI_NO" + ",A.RYOIKI_RYAKU" + " FROM MASTER_RYOIKI A" + " ORDER BY RYOIKI_NO"; StringBuffer query = new StringBuffer(select); if (log.isDebugEnabled()) { log.debug("query:" + query); } // ----------------------- // リスト取得 // ----------------------- try { return SelectUtil.select(connection, query.toString()); } catch (DataAccessException e) { throw new ApplicationException("領域情報検索中にDBエラーが発生しました。", new ErrorInfo("errors.4004"), e); } catch (NoDataFoundException e) { throw new NoDataFoundException("領域マスタに1件もデータがありません。", e); } } /** * 領域マスタの1レコードをMap形式で返す。 引数には主キー値を渡す。 * * @param connection * @param labelKubun * @param value * @return * @throws NoDataFoundException * @throws DataAccessException */ public static Map selectRecord(Connection connection, String ryouikiNo) throws NoDataFoundException, DataAccessException { // ----------------------- // SQL文の作成 // ----------------------- String select = "SELECT" + " A.RYOIKI_NO" + ",A.RYOIKI_RYAKU" + ",A.KOMOKU_NO" // 2006/06/26 苗 修正ここから + ",A.SETTEI_KIKAN" // 設定期間 + ",A.SETTEI_KIKAN_KAISHI" // 設定期間(開始年度) + ",A.SETTEI_KIKAN_SHURYO" // 設定期間(終了年度) // 2006/06/26 苗 修正ここまで + ",A.BIKO" + " FROM MASTER_RYOIKI A" + " WHERE RYOIKI_NO = ? "; if (log.isDebugEnabled()) { log.debug("query:" + select); } // ----------------------- // レコード取得 // ----------------------- List result = SelectUtil.select(connection, select, new String[] {ryouikiNo}); if (result.isEmpty()) { throw new NoDataFoundException("当該レコードは存在しません。領域No=" + ryouikiNo); } return (Map) result.get(0); } /** * 領域マスタの1レコードをMap形式で返す。 引数には主キー値を渡す。 * * @param connection * @param labelKubun * @param value * @return * @throws NoDataFoundException * @throws DataAccessException */ public static Map selectRecord(Connection connection, RyouikiInfoPk pkInfo) throws NoDataFoundException, DataAccessException { return selectRecord(connection, pkInfo, "0"); } /** * 領域マスタの1レコードをMap形式で返す。 引数には主キー値を渡す。 * * @param connection * @param labelKubun * @param value * @return * @throws NoDataFoundException * @throws DataAccessException */ public static Map selectRecord(Connection connection, RyouikiInfoPk pkInfo, String ryoikiKbn) throws NoDataFoundException, DataAccessException { // ----------------------- // SQL文の作成 // ----------------------- String select = "SELECT" + " A.RYOIKI_NO" + ",A.RYOIKI_RYAKU" + ",A.KOMOKU_NO" // 2006/07/04 苗 修正ここから + ",A.SETTEI_KIKAN" // 設定期間 + ",A.SETTEI_KIKAN_KAISHI" // 設定期間(開始年度) + ",A.SETTEI_KIKAN_SHURYO" // 設定期間(終了年度) // 2006/07/04 苗 修正ここまで + " FROM MASTER_RYOIKI A" + " WHERE RYOIKI_NO = ? " + " AND KOMOKU_NO = ? "; // 計画研究の場合 if ("1".equals(ryoikiKbn)) { select = select + " AND KEIKAKU_FLG = '1'"; } // 公募研究の場合 else if ("2".equals(ryoikiKbn)) { select = select + " AND KOUBO_FLG = '1'"; } if (log.isDebugEnabled()) { log.debug("query:" + select); } // ----------------------- // レコード取得 // ----------------------- List result = SelectUtil.select( connection, select, new String[] {pkInfo.getRyoikiNo(), pkInfo.getKomokuNo()}); if (result.isEmpty()) { throw new NoDataFoundException("当該レコードは存在しません。"); } return (Map) result.get(0); } /** * 領域情報を登録する。 * * @param connection コネクション * @param addInfo 登録するキーワード情報 * @throws DataAccessException 登録中に例外が発生した場合。 * @throws DuplicateKeyException キーに一致するデータが既に存在する場合。 */ public void insertRyoikiInfo(Connection connection, RyouikiInfo addInfo) throws DataAccessException, DuplicateKeyException { // 重複チェック try { selectRecord(connection, addInfo); // NG throw new DuplicateKeyException("'" + addInfo + "'は既に登録されています。"); } catch (NoDataFoundException e) { // OK } String query = "INSERT INTO MASTER_RYOIKI " + "(" + " RYOIKI_NO" // 領域番号 + ",RYOIKI_RYAKU" // 領域略称名 + ",KOMOKU_NO" // 研究項目番号 + ",KOUBO_FLG" // 公募フラグ + ",KEIKAKU_FLG" // 計画研究フラグ // add start liuyi 2006/06/30 + ",ZENNENDO_OUBO_FLG" // 前年度応募フラグ + ",SETTEI_KIKAN_KAISHI" // 設定期間(開始年度) + ",SETTEI_KIKAN_SHURYO" // 設定期間(終了年度) + ",SETTEI_KIKAN" // 設定期間 // add end liuyi 2006/06/30 + ",BIKO" // 備考 + ")" + "VALUES " + "(?,?,?,?,?,?,?,?,?,?)"; PreparedStatement preparedStatement = null; try { // 登録 preparedStatement = connection.prepareStatement(query); int i = 1; DatabaseUtil.setParameter(preparedStatement, i++, addInfo.getRyoikiNo()); DatabaseUtil.setParameter(preparedStatement, i++, addInfo.getRyoikiName()); DatabaseUtil.setParameter(preparedStatement, i++, addInfo.getKomokuNo()); DatabaseUtil.setParameter(preparedStatement, i++, addInfo.getKobou()); DatabaseUtil.setParameter(preparedStatement, i++, addInfo.getKeikaku()); // add start liuyi 2006/06/30 DatabaseUtil.setParameter(preparedStatement, i++, addInfo.getZennendoOuboFlg()); DatabaseUtil.setParameter(preparedStatement, i++, addInfo.getSettelKikanKaishi()); DatabaseUtil.setParameter(preparedStatement, i++, addInfo.getSettelKikanShuryo()); DatabaseUtil.setParameter(preparedStatement, i++, addInfo.getSettelKikan()); // add end liuyi 2006/06/30 DatabaseUtil.setParameter(preparedStatement, i++, addInfo.getBiko()); DatabaseUtil.executeUpdate(preparedStatement); } catch (SQLException ex) { log.error("領域マスタ情報登録中に例外が発生しました。 ", ex); throw new DataAccessException("領域マスタ情報登録中に例外が発生しました。 ", ex); } finally { DatabaseUtil.closeResource(null, preparedStatement); } } /** * コード一覧作成用メソッド。<br> * 領域番号と領域名称の一覧を取得する。 領域番号順にソートする。 * * @param connection コネクション * @return * @throws ApplicationException */ public static List selectRyoikiInfoList(Connection connection, String kubun) throws ApplicationException { // ----------------------- // SQL文の作成 // ----------------------- String select = "SELECT" + " RYOIKI_NO," // 領域番号 + " RYOIKI_RYAKU" // 領域名称 + " FROM MASTER_RYOIKI"; if ("1".equals(kubun)) { select = select + " WHERE KEIKAKU_FLG = '1'"; } else { select = select + " WHERE KOUBO_FLG = '1'"; } select = select + " GROUP BY RYOIKI_NO, RYOIKI_RYAKU" + " ORDER BY RYOIKI_NO"; if (log.isDebugEnabled()) { log.debug("query:" + select); } // ----------------------- // リスト取得 // ----------------------- try { return SelectUtil.select(connection, select); } catch (DataAccessException e) { throw new ApplicationException("領域情報検索中にDBエラーが発生しました。", new ErrorInfo("errors.4004"), e); } catch (NoDataFoundException e) { throw new SystemException("領域マスタに1件もデータがありません。", e); } } // 2006/06/26 苗 追加ここから /** * 領域番号の件数を取得する。 領域番号順にソートする。 * * @param connection コネクション * @param ryoikoNo 領域番号 * @return * @throws ApplicationException * @throws DataAccessException */ public String selectRyoikiNoCount(Connection connection, String ryoikoNo) throws ApplicationException, DataAccessException { String strCount = null; ResultSet recordSet = null; PreparedStatement preparedStatement = null; // ----------------------- // SQL文の作成 // ----------------------- StringBuffer select = new StringBuffer(); select.append("SELECT COUNT(RYOIKI_NO) "); select.append(ISystemServise.STR_COUNT); select.append(" FROM"); select.append(" (SELECT MR.RYOIKI_NO FROM MASTER_RYOIKI MR WHERE MR.ZENNENDO_OUBO_FLG = '1' "); select.append(" AND MR.RYOIKI_NO = '"); select.append(EscapeUtil.toSqlString(ryoikoNo)); select.append("')"); if (log.isDebugEnabled()) { log.debug("query:" + select.toString()); } try { preparedStatement = connection.prepareStatement(select.toString()); recordSet = preparedStatement.executeQuery(); if (recordSet.next()) { strCount = recordSet.getString(ISystemServise.STR_COUNT); } else { throw new NoDataFoundException("領域マスタデータテーブルに該当するデータが見つかりません。"); } } catch (SQLException ex) { throw new DataAccessException("領域マスタデータテーブルの検索中に例外が発生しました。", ex); } catch (NoDataFoundException ex) { throw new NoDataFoundException("該当する領域番号が存在しません。", ex); } return strCount; } // 2006/06/26 苗 追加ここまで // 2006/07/24 苗 追加ここから /** * コード一覧(新規領域)作成用メソッド。<br> * 領域番号と領域名称の一覧を取得する。 領域番号順にソートする。 * * @param connection コネクション * @return List * @throws ApplicationException */ public static List selectRyoikiSinnkiInfoList(Connection connection) throws ApplicationException { // ----------------------- // SQL文の作成 // ----------------------- StringBuffer select = new StringBuffer(); select.append("SELECT DISTINCT"); select.append(" RYOIKI_NO,"); // 領域番号 select.append(" RYOIKI_RYAKU,"); // 領域名称 select.append(" SETTEI_KIKAN"); // 設定期間 select.append(" FROM MASTER_RYOIKI"); select.append(" WHERE ZENNENDO_OUBO_FLG = '1'"); select.append(" ORDER BY RYOIKI_NO"); if (log.isDebugEnabled()) { log.debug("query:" + select); } // ----------------------- // リスト取得 // ----------------------- try { return SelectUtil.select(connection, select.toString()); } catch (DataAccessException e) { throw new ApplicationException("領域情報検索中にDBエラーが発生しました。", new ErrorInfo("errors.4004"), e); } catch (NoDataFoundException e) { throw new SystemException("領域マスタに1件もデータがありません。", e); } } // 2006/07/24 苗 追加ここまで }
/** * Distribute application-specific large, read-only files efficiently. * * <p><code>DistributedCache</code> is a facility provided by the Map-Reduce framework to cache * files (text, archives, jars etc.) needed by applications. * * <p>Applications specify the files, via urls (hdfs:// or http://) to be cached via the {@link * org.apache.hadoop.mapred.JobConf}. The <code>DistributedCache</code> assumes that the files * specified via hdfs:// urls are already present on the {@link FileSystem} at the path specified by * the url. * * <p>The framework will copy the necessary files on to the slave node before any tasks for the job * are executed on that node. Its efficiency stems from the fact that the files are only copied once * per job and the ability to cache archives which are un-archived on the slaves. * * <p><code>DistributedCache</code> can be used to distribute simple, read-only data/text files * and/or more complex types such as archives, jars etc. Archives (zip, tar and tgz/tar.gz files) * are un-archived at the slave nodes. Jars may be optionally added to the classpath of the tasks, a * rudimentary software distribution mechanism. Files have execution permissions. Optionally users * can also direct it to symlink the distributed cache file(s) into the working directory of the * task. * * <p><code>DistributedCache</code> tracks modification timestamps of the cache files. Clearly the * cache files should not be modified by the application or externally while the job is executing. * * <p>Here is an illustrative example on how to use the <code>DistributedCache</code>: * * <p> * * <blockquote> * * <pre> * // Setting up the cache for the application * * 1. Copy the requisite files to the <code>FileSystem</code>: * * $ bin/hadoop fs -copyFromLocal lookup.dat /myapp/lookup.dat * $ bin/hadoop fs -copyFromLocal map.zip /myapp/map.zip * $ bin/hadoop fs -copyFromLocal mylib.jar /myapp/mylib.jar * $ bin/hadoop fs -copyFromLocal mytar.tar /myapp/mytar.tar * $ bin/hadoop fs -copyFromLocal mytgz.tgz /myapp/mytgz.tgz * $ bin/hadoop fs -copyFromLocal mytargz.tar.gz /myapp/mytargz.tar.gz * * 2. Setup the application's <code>JobConf</code>: * * JobConf job = new JobConf(); * DistributedCache.addCacheFile(new URI("/myapp/lookup.dat#lookup.dat"), * job); * DistributedCache.addCacheArchive(new URI("/myapp/map.zip", job); * DistributedCache.addFileToClassPath(new Path("/myapp/mylib.jar"), job); * DistributedCache.addCacheArchive(new URI("/myapp/mytar.tar", job); * DistributedCache.addCacheArchive(new URI("/myapp/mytgz.tgz", job); * DistributedCache.addCacheArchive(new URI("/myapp/mytargz.tar.gz", job); * * 3. Use the cached files in the {@link org.apache.hadoop.mapred.Mapper} * or {@link org.apache.hadoop.mapred.Reducer}: * * public static class MapClass extends MapReduceBase * implements Mapper<K, V, K, V> { * * private Path[] localArchives; * private Path[] localFiles; * * public void configure(JobConf job) { * // Get the cached archives/files * localArchives = DistributedCache.getLocalCacheArchives(job); * localFiles = DistributedCache.getLocalCacheFiles(job); * } * * public void map(K key, V value, * OutputCollector<K, V> output, Reporter reporter) * throws IOException { * // Use data from the cached archives/files here * // ... * // ... * output.collect(k, v); * } * } * * </pre> * * </blockquote> * * @see org.apache.hadoop.mapred.JobConf * @see org.apache.hadoop.mapred.JobClient */ public class DistributedCache { // cacheID to cacheStatus mapping private static TreeMap<String, CacheStatus> cachedArchives = new TreeMap<String, CacheStatus>(); private static TreeMap<Path, Long> baseDirSize = new TreeMap<Path, Long>(); private static TreeMap<Path, Integer> baseDirNumberSubDir = new TreeMap<Path, Integer>(); // default total cache size private static final long DEFAULT_CACHE_SIZE = 10737418240L; private static final long DEFAULT_CACHE_SUBDIR_LIMIT = 10000; private static final Log LOG = LogFactory.getLog(DistributedCache.class); private static Random random = new Random(); /** * Get the locally cached file or archive; it could either be previously cached (and valid) or * copy it from the {@link FileSystem} now. * * @param cache the cache to be localized, this should be specified as new * URI(hdfs://hostname:port/absolute_path_to_file#LINKNAME). If no schema or hostname:port is * provided the file is assumed to be in the filesystem being used in the Configuration * @param conf The Confguration file which contains the filesystem * @param baseDir The base cache Dir where you wnat to localize the files/archives * @param fileStatus The file status on the dfs. * @param isArchive if the cache is an archive or a file. In case it is an archive with a .zip or * .jar or .tar or .tgz or .tar.gz extension it will be unzipped/unjarred/untarred * automatically and the directory where the archive is unzipped/unjarred/untarred is returned * as the Path. In case of a file, the path to the file is returned * @param confFileStamp this is the hdfs file modification timestamp to verify that the file to be * cached hasn't changed since the job started * @param currentWorkDir this is the directory where you would want to create symlinks for the * locally cached files/archives * @return the path to directory where the archives are unjarred in case of archives, the path to * the file where the file is copied locally * @throws IOException */ public static Path getLocalCache( URI cache, Configuration conf, Path baseDir, FileStatus fileStatus, boolean isArchive, long confFileStamp, Path currentWorkDir, MRAsyncDiskService asyncDiskService) throws IOException { return getLocalCache( cache, conf, baseDir, fileStatus, isArchive, confFileStamp, fileStatus.getLen(), currentWorkDir, true, asyncDiskService, new LocalDirAllocator("mapred.local.dir")); } public static Path getLocalCacheFromTimestamps( URI cache, Configuration conf, Path subDir, FileStatus fileStatus, boolean isArchive, long confFileStamp, long fileLength, Path currentWorkDir, boolean honorSymLinkConf, MRAsyncDiskService asyncDiskService, LocalDirAllocator lDirAllocator) throws IOException { return getLocalCache( cache, conf, subDir, fileStatus, isArchive, confFileStamp, fileLength, currentWorkDir, honorSymLinkConf, asyncDiskService, lDirAllocator); } public static Path getLocalCacheFromURI( URI cache, Configuration conf, Path subDir, boolean isArchive, long fileLength, Path currentWorkDir, boolean honorSymLinkConf, MRAsyncDiskService asyncDiskService, LocalDirAllocator lDirAllocator) throws IOException { return getLocalCache( cache, conf, subDir, null, isArchive, 0, fileLength, currentWorkDir, honorSymLinkConf, asyncDiskService, lDirAllocator); } /** Added for back compatibility. */ public static Path getLocalCache( URI cache, Configuration conf, Path subdir, FileStatus fileStatus, boolean isArchive, long confFileStamp, Path currentWorkDir, boolean honorSymLinkConf, MRAsyncDiskService asyncDiskService, LocalDirAllocator lDirAllocator) throws IOException { return getLocalCache( cache, conf, subdir, fileStatus, isArchive, confFileStamp, fileStatus.getLen(), currentWorkDir, honorSymLinkConf, asyncDiskService, lDirAllocator); } /** * Get the locally cached file or archive; it could either be previously cached (and valid) or * copy it from the {@link FileSystem} now. * * @param cache the cache to be localized, this should be specified as new * URI(hdfs://hostname:port/absolute_path_to_file#LINKNAME). If no schema or hostname:port is * provided the file is assumed to be in the filesystem being used in the Configuration * @param conf The Confguration file which contains the filesystem * @param subDir The sub cache Dir where you want to localize the files/archives * @param fileStatus The file status on the dfs. * @param isArchive if the cache is an archive or a file. In case it is an archive with a .zip or * .jar or .tar or .tgz or .tar.gz extension it will be unzipped/unjarred/untarred * automatically and the directory where the archive is unzipped/unjarred/untarred is returned * as the Path. In case of a file, the path to the file is returned * @param confFileStamp this is the hdfs file modification timestamp to verify that the file to be * cached hasn't changed since the job started * @param fileLength this is the length of the cache file * @param currentWorkDir this is the directory where you would want to create symlinks for the * locally cached files/archives * @param honorSymLinkConf if this is false, then the symlinks are not created even if conf says * so (this is required for an optimization in task launches * @param lDirAllocator LocalDirAllocator of the tracker * @return the path to directory where the archives are unjarred in case of archives, the path to * the file where the file is copied locally * @throws IOException */ private static Path getLocalCache( URI cache, Configuration conf, Path subDir, FileStatus fileStatus, boolean isArchive, long confFileStamp, long fileLength, Path currentWorkDir, boolean honorSymLinkConf, MRAsyncDiskService asyncDiskService, LocalDirAllocator lDirAllocator) throws IOException { String key = getKey(cache, conf, confFileStamp); CacheStatus lcacheStatus; Path localizedPath; synchronized (cachedArchives) { lcacheStatus = cachedArchives.get(key); if (lcacheStatus == null) { // was never localized Path uniqueParentDir = new Path(subDir, String.valueOf(random.nextLong())); String cachePath = new Path(uniqueParentDir, makeRelative(cache, conf)).toString(); Path localPath = lDirAllocator.getLocalPathForWrite(cachePath, fileLength, conf); lcacheStatus = new CacheStatus( new Path(localPath.toString().replace(cachePath, "")), localPath, uniqueParentDir); cachedArchives.put(key, lcacheStatus); } lcacheStatus.refcount++; } boolean initSuccessful = false; try { synchronized (lcacheStatus) { if (!lcacheStatus.isInited()) { localizedPath = localizeCache(conf, cache, confFileStamp, lcacheStatus, isArchive); lcacheStatus.initComplete(); } else { if (fileStatus != null) { localizedPath = checkCacheStatusValidity( conf, cache, confFileStamp, lcacheStatus, fileStatus, isArchive); } else { // if fileStatus is null, then the md5 must be correct // so there is no need to check for cache validity localizedPath = lcacheStatus.localizedLoadPath; } } createSymlink(conf, cache, lcacheStatus, isArchive, currentWorkDir, honorSymLinkConf); } // try deleting stuff if you can long size = 0; int numberSubDir = 0; synchronized (lcacheStatus) { synchronized (baseDirSize) { Long get = baseDirSize.get(lcacheStatus.getBaseDir()); if (get != null) { size = get.longValue(); } else { LOG.warn("Cannot find size of baseDir: " + lcacheStatus.getBaseDir()); } } synchronized (baseDirNumberSubDir) { Integer get = baseDirNumberSubDir.get(lcacheStatus.getBaseDir()); if (get != null) { numberSubDir = get.intValue(); } else { LOG.warn("Cannot find subdirectories limit of baseDir: " + lcacheStatus.getBaseDir()); } } } // setting the cache size to a default of 10GB long allowedSize = conf.getLong("local.cache.size", DEFAULT_CACHE_SIZE); long allowedNumberSubDir = conf.getLong("local.cache.numbersubdir", DEFAULT_CACHE_SUBDIR_LIMIT); if (allowedSize < size || allowedNumberSubDir < numberSubDir) { // try some cache deletions LOG.debug( "Start deleting released cache because" + " [size, allowedSize, numberSubDir, allowedNumberSubDir] =" + " [" + size + ", " + allowedSize + ", " + numberSubDir + ", " + allowedNumberSubDir + "]"); deleteCache(conf, asyncDiskService); } initSuccessful = true; return localizedPath; } finally { if (!initSuccessful) { synchronized (cachedArchives) { lcacheStatus.refcount--; } } } } /** * Get the locally cached file or archive; it could either be previously cached (and valid) or * copy it from the {@link FileSystem} now. * * @param cache the cache to be localized, this should be specified as new * URI(hdfs://hostname:port/absolute_path_to_file#LINKNAME). If no schema or hostname:port is * provided the file is assumed to be in the filesystem being used in the Configuration * @param conf The Confguration file which contains the filesystem * @param baseDir The base cache Dir where you wnat to localize the files/archives * @param isArchive if the cache is an archive or a file. In case it is an archive with a .zip or * .jar or .tar or .tgz or .tar.gz extension it will be unzipped/unjarred/untarred * automatically and the directory where the archive is unzipped/unjarred/untarred is returned * as the Path. In case of a file, the path to the file is returned * @param confFileStamp this is the hdfs file modification timestamp to verify that the file to be * cached hasn't changed since the job started * @param currentWorkDir this is the directory where you would want to create symlinks for the * locally cached files/archives * @return the path to directory where the archives are unjarred in case of archives, the path to * the file where the file is copied locally * @throws IOException */ public static Path getLocalCache( URI cache, Configuration conf, Path baseDir, boolean isArchive, long confFileStamp, Path currentWorkDir, MRAsyncDiskService asyncDiskService) throws IOException { return getLocalCache( cache, conf, baseDir, null, isArchive, confFileStamp, currentWorkDir, asyncDiskService); } /** * This is the opposite of getlocalcache. When you are done with using the cache, you need to * release the cache * * @param cache The cache URI to be released * @param conf configuration which contains the filesystem the cache is contained in. * @throws IOException */ public static void releaseCache(URI cache, Configuration conf, long timeStamp) throws IOException { String cacheId = getKey(cache, conf, timeStamp); synchronized (cachedArchives) { CacheStatus lcacheStatus = cachedArchives.get(cacheId); if (lcacheStatus == null) { LOG.warn( "Cannot find localized cache: " + cache + " (key: " + cacheId + ") in releaseCache!"); return; } lcacheStatus.refcount--; } } /** Runnable which removes the cache directories from the disk */ private static class CacheFileCleanTask implements Runnable { private MRAsyncDiskService asyncDiskService; private LocalFileSystem fs; private List<CacheStatus> toBeDeletedCache; public CacheFileCleanTask( MRAsyncDiskService asyncDiskService, LocalFileSystem fs, List<CacheStatus> toBeDeletedCache) { this.asyncDiskService = asyncDiskService; this.fs = fs; this.toBeDeletedCache = toBeDeletedCache; } @Override public void run() { for (CacheStatus lcacheStatus : toBeDeletedCache) { synchronized (lcacheStatus) { Path fullUniqueParentDir = new Path(lcacheStatus.localizedBaseDir, lcacheStatus.uniqueParentDir); try { LOG.info("Deleting local cached path: " + fullUniqueParentDir.toString()); deleteLocalPath(asyncDiskService, fs, fullUniqueParentDir); // decrement the size of the cache from baseDirSize deleteCacheInfoUpdate(lcacheStatus); LOG.info("Removed cache " + lcacheStatus.localizedLoadPath); } catch (IOException e) { LOG.warn("Error when deleting " + fullUniqueParentDir, e); } } } } } // To delete the caches which have a refcount of zero private static void deleteCache(Configuration conf, MRAsyncDiskService asyncDiskService) throws IOException { List<CacheStatus> deleteSet = new LinkedList<CacheStatus>(); // try deleting cache Status with refcount of zero synchronized (cachedArchives) { for (Iterator<String> it = cachedArchives.keySet().iterator(); it.hasNext(); ) { String cacheId = (String) it.next(); CacheStatus lcacheStatus = cachedArchives.get(cacheId); if (lcacheStatus.refcount == 0) { // delete this cache entry from the global list // and mark the localized file for deletion deleteSet.add(lcacheStatus); it.remove(); } } } // do the deletion asynchronously, after releasing the global lock Thread cacheFileCleaner = new Thread(new CacheFileCleanTask(asyncDiskService, FileSystem.getLocal(conf), deleteSet)); cacheFileCleaner.start(); } /** * Delete a local path with asyncDiskService if available, or otherwise synchronously with local * file system. */ private static void deleteLocalPath( MRAsyncDiskService asyncDiskService, LocalFileSystem fs, Path path) throws IOException { boolean deleted = false; if (asyncDiskService != null) { // Try to delete using asyncDiskService String localPathToDelete = path.toUri().getPath(); deleted = asyncDiskService.moveAndDeleteAbsolutePath(localPathToDelete); if (!deleted) { LOG.warn( "Cannot find DistributedCache path " + localPathToDelete + " on any of the asyncDiskService volumes!"); } } if (!deleted) { // If no asyncDiskService, we will delete the files synchronously fs.delete(path, true); } LOG.info("Deleted path " + path); } /* * Returns the relative path of the dir this cache will be localized in * relative path that this cache will be localized in. For * hdfs://hostname:port/absolute_path -- the relative path is * hostname/absolute path -- if it is just /absolute_path -- then the * relative path is hostname of DFS this mapred cluster is running * on/absolute_path */ public static String makeRelative(URI cache, Configuration conf) throws IOException { String host = cache.getHost(); if (host == null) { host = cache.getScheme(); } if (host == null) { URI defaultUri = FileSystem.get(conf).getUri(); host = defaultUri.getHost(); if (host == null) { host = defaultUri.getScheme(); } } String path = host + cache.getPath(); path = path.replace(":/", "/"); // remove windows device colon return path; } static String getKey(URI cache, Configuration conf, long timeStamp) throws IOException { return makeRelative(cache, conf) + String.valueOf(timeStamp); } private static Path checkCacheStatusValidity( Configuration conf, URI cache, long confFileStamp, CacheStatus cacheStatus, FileStatus fileStatus, boolean isArchive) throws IOException { FileSystem fs = FileSystem.get(cache, conf); // Has to be if (!ifExistsAndFresh(conf, fs, cache, confFileStamp, cacheStatus, fileStatus)) { throw new IOException( "Stale cache file: " + cacheStatus.localizedLoadPath + " for cache-file: " + cache); } LOG.info( String.format( "Using existing cache of %s->%s", cache.toString(), cacheStatus.localizedLoadPath)); return cacheStatus.localizedLoadPath; } private static void createSymlink( Configuration conf, URI cache, CacheStatus cacheStatus, boolean isArchive, Path currentWorkDir, boolean honorSymLinkConf) throws IOException { boolean doSymlink = honorSymLinkConf && DistributedCache.getSymlink(conf); if (cache.getFragment() == null) { doSymlink = false; } String link = currentWorkDir.toString() + Path.SEPARATOR + cache.getFragment(); File flink = new File(link); if (doSymlink) { if (!flink.exists()) { FileUtil.symLink(cacheStatus.localizedLoadPath.toString(), link); } } } // the method which actually copies the caches locally and unjars/unzips them // and does chmod for the files private static Path localizeCache( Configuration conf, URI cache, long confFileStamp, CacheStatus cacheStatus, boolean isArchive) throws IOException { FileSystem fs = getFileSystem(cache, conf); FileSystem localFs = FileSystem.getLocal(conf); Path parchive = null; if (isArchive) { parchive = new Path( cacheStatus.localizedLoadPath, new Path(cacheStatus.localizedLoadPath.getName())); } else { parchive = cacheStatus.localizedLoadPath; } if (!localFs.mkdirs(parchive.getParent())) { throw new IOException( "Mkdirs failed to create directory " + cacheStatus.localizedLoadPath.toString()); } String cacheId = cache.getPath(); fs.copyToLocalFile(new Path(cacheId), parchive); if (isArchive) { String tmpArchive = parchive.toString().toLowerCase(); File srcFile = new File(parchive.toString()); File destDir = new File(parchive.getParent().toString()); if (tmpArchive.endsWith(".jar")) { RunJar.unJar(srcFile, destDir); } else if (tmpArchive.endsWith(".zip")) { FileUtil.unZip(srcFile, destDir); } else if (isTarFile(tmpArchive)) { FileUtil.unTar(srcFile, destDir); } // else will not do anyhting // and copy the file into the dir as it is } long cacheSize = FileUtil.getDU(new File(parchive.getParent().toString())); cacheStatus.size = cacheSize; addCacheInfoUpdate(cacheStatus); // do chmod here try { // Setting recursive permission to grant everyone read and execute Path localDir = new Path(cacheStatus.localizedBaseDir, cacheStatus.uniqueParentDir); LOG.info("Doing chmod on localdir :" + localDir); FileUtil.chmod(localDir.toString(), "ugo+rx", true); } catch (InterruptedException e) { LOG.warn("Exception in chmod" + e.toString()); } // update cacheStatus to reflect the newly cached file cacheStatus.mtime = getTimestamp(conf, cache); return cacheStatus.localizedLoadPath; } private static boolean isTarFile(String filename) { return (filename.endsWith(".tgz") || filename.endsWith(".tar.gz") || filename.endsWith(".tar")); } // Checks if the cache has already been localized and is fresh private static boolean ifExistsAndFresh( Configuration conf, FileSystem fs, URI cache, long confFileStamp, CacheStatus lcacheStatus, FileStatus fileStatus) throws IOException { // check for existence of the cache long dfsFileStamp; if (fileStatus != null) { dfsFileStamp = fileStatus.getModificationTime(); } else { dfsFileStamp = getTimestamp(conf, cache); } // ensure that the file on hdfs hasn't been modified since the job started if (dfsFileStamp != confFileStamp) { LOG.fatal("File: " + cache + " has changed on HDFS since job started"); throw new IOException("File: " + cache + " has changed on HDFS since job started"); } if (dfsFileStamp != lcacheStatus.mtime) { // needs refreshing return false; } return true; } /** * Returns mtime of a given cache file on hdfs. * * @param conf configuration * @param cache cache file * @return mtime of a given cache file on hdfs * @throws IOException */ public static long getTimestamp(Configuration conf, URI cache) throws IOException { FileSystem fileSystem = FileSystem.get(cache, conf); Path filePath = new Path(cache.getPath()); return fileSystem.getFileStatus(filePath).getModificationTime(); } /** * Returns the status of a given cache file on hdfs. * * @param conf configuration * @param cache cache file * @return FileStatus object of the file * @throws IOException */ public static FileStatus getFileStatus(Configuration conf, URI cache) throws IOException { FileSystem fileSystem = FileSystem.get(cache, conf); Path filePath = new Path(cache.getPath()); return fileSystem.getFileStatus(filePath); } /** * This method create symlinks for all files in a given dir in another directory * * @param conf the configuration * @param jobCacheDir the target directory for creating symlinks * @param workDir the directory in which the symlinks are created * @throws IOException */ public static void createAllSymlink(Configuration conf, File jobCacheDir, File workDir) throws IOException { if ((jobCacheDir == null || !jobCacheDir.isDirectory()) || workDir == null || (!workDir.isDirectory())) { return; } boolean createSymlink = getSymlink(conf); if (createSymlink) { File[] list = jobCacheDir.listFiles(); for (int i = 0; i < list.length; i++) { FileUtil.symLink( list[i].getAbsolutePath(), new File(workDir, list[i].getName()).toString()); } } } private static String getFileSysName(URI url) { String fsname = url.getScheme(); if ("hdfs".equals(fsname)) { String host = url.getHost(); int port = url.getPort(); return (port == (-1)) ? host : (host + ":" + port); } else { return null; } } private static FileSystem getFileSystem(URI cache, Configuration conf) throws IOException { String fileSysName = getFileSysName(cache); if (fileSysName != null) return FileSystem.getNamed(fileSysName, conf); else return FileSystem.get(conf); } /** * Set the configuration with the given set of archives * * @param archives The list of archives that need to be localized * @param conf Configuration which will be changed */ public static void setCacheArchives(URI[] archives, Configuration conf) { String sarchives = StringUtils.uriToString(archives); conf.set("mapred.cache.archives", sarchives); } /** * Set the configuration with the given set of files * * @param files The list of files that need to be localized * @param conf Configuration which will be changed */ public static void setCacheFiles(URI[] files, Configuration conf) { String sfiles = StringUtils.uriToString(files); conf.set("mapred.cache.files", sfiles); } /** * Get cache archives set in the Configuration * * @param conf The configuration which contains the archives * @return A URI array of the caches set in the Configuration * @throws IOException */ public static URI[] getCacheArchives(Configuration conf) throws IOException { return StringUtils.stringToURI(conf.getStrings("mapred.cache.archives")); } /** * Get cache archives set in the Configuration * * @param conf The configuration which contains the archives * @return A URI array of the caches set in the Configuration * @throws IOException */ public static URI[] getSharedCacheArchives(Configuration conf) throws IOException { return StringUtils.stringToURI(conf.getStrings("mapred.cache.shared.archives")); } /** * Get cache files set in the Configuration * * @param conf The configuration which contains the files * @return A URI array of the files set in the Configuration * @throws IOException */ public static URI[] getCacheFiles(Configuration conf) throws IOException { return StringUtils.stringToURI(conf.getStrings("mapred.cache.files")); } /** * Get cache files set in the Configuration * * @param conf The configuration which contains the files * @return A URI array of the files set in the Configuration * @throws IOException */ public static URI[] getSharedCacheFiles(Configuration conf) throws IOException { return StringUtils.stringToURI(conf.getStrings("mapred.cache.shared.files")); } /** * Return the path array of the localized caches * * @param conf Configuration that contains the localized archives * @return A path array of localized caches * @throws IOException */ public static Path[] getLocalCacheArchives(Configuration conf) throws IOException { return StringUtils.stringToPath(conf.getStrings("mapred.cache.localArchives")); } /** * Return the path array of the localized caches * * @param conf Configuration that contains the localized archives * @return A path array of localized caches * @throws IOException */ public static Path[] getLocalSharedCacheArchives(Configuration conf) throws IOException { return StringUtils.stringToPath(conf.getStrings("mapred.cache.shared.localArchives")); } /** * Return the path array of the localized files * * @param conf Configuration that contains the localized files * @return A path array of localized files * @throws IOException */ public static Path[] getLocalCacheFiles(Configuration conf) throws IOException { return StringUtils.stringToPath(conf.getStrings("mapred.cache.localFiles")); } /** * Return the path array of the localized files * * @param conf Configuration that contains the localized files * @return A path array of localized files * @throws IOException */ public static Path[] getLocalSharedCacheFiles(Configuration conf) throws IOException { return StringUtils.stringToPath(conf.getStrings("mapred.cache.shared.localFiles")); } /** * Get the timestamps of the archives * * @param conf The configuration which stored the timestamps * @return a string array of timestamps * @throws IOException */ public static String[] getArchiveTimestamps(Configuration conf) { return conf.getStrings("mapred.cache.archives.timestamps"); } /** * Get the timestamps of the files * * @param conf The configuration which stored the timestamps * @return a string array of timestamps * @throws IOException */ public static String[] getFileTimestamps(Configuration conf) { return conf.getStrings("mapred.cache.files.timestamps"); } public static String[] getSharedArchiveLength(Configuration conf) { return conf.getStrings("mapred.cache.shared.archives.length"); } public static String[] getSharedFileLength(Configuration conf) { return conf.getStrings("mapred.cache.shared.files.length"); } /** * This is to check the timestamp of the archives to be localized * * @param conf Configuration which stores the timestamp's * @param timestamps comma separated list of timestamps of archives. The order should be the same * as the order in which the archives are added. */ public static void setArchiveTimestamps(Configuration conf, String timestamps) { conf.set("mapred.cache.archives.timestamps", timestamps); } public static void setSharedArchiveLength(Configuration conf, String length) { conf.set("mapred.cache.shared.archives.length", length); } /** * This is to check the timestamp of the files to be localized * * @param conf Configuration which stores the timestamp's * @param timestamps comma separated list of timestamps of files. The order should be the same as * the order in which the files are added. */ public static void setFileTimestamps(Configuration conf, String timestamps) { conf.set("mapred.cache.files.timestamps", timestamps); } public static void setSharedFileLength(Configuration conf, String length) { conf.set("mapred.cache.shared.files.length", length); } /** * Set the conf to contain the location for localized archives * * @param conf The conf to modify to contain the localized caches * @param str a comma separated list of local archives */ public static void setLocalArchives(Configuration conf, String str) { conf.set("mapred.cache.localArchives", str); } /** * Set the conf to contain the location for localized archives * * @param conf The conf to modify to contain the localized caches * @param str a comma separated list of local archives */ public static void setLocalSharedArchives(Configuration conf, String str) { conf.set("mapred.cache.shared.localArchives", str); } /** * Set the conf to contain the location for localized files * * @param conf The conf to modify to contain the localized caches * @param str a comma separated list of local files */ public static void setLocalFiles(Configuration conf, String str) { conf.set("mapred.cache.localFiles", str); } /** * Set the conf to contain the location for localized files * * @param conf The conf to modify to contain the localized caches * @param str a comma separated list of local files */ public static void setLocalSharedFiles(Configuration conf, String str) { conf.set("mapred.cache.shared.localFiles", str); } /** * Add a archives to be localized to the conf * * @param uri The uri of the cache to be localized * @param conf Configuration to add the cache to */ public static void addCacheArchive(URI uri, Configuration conf) { String archives = conf.get("mapred.cache.archives"); conf.set( "mapred.cache.archives", archives == null ? uri.toString() : archives + "," + uri.toString()); } /** * Add a archives to be localized to the conf * * @param uri The uri of the cache to be localized * @param conf Configuration to add the cache to */ public static void addSharedCacheArchive(URI uri, Configuration conf) { String archives = conf.get("mapred.cache.shared.archives"); conf.set( "mapred.cache.shared.archives", archives == null ? uri.toString() : archives + "," + uri.toString()); } /** * Add a file to be localized to the conf * * @param uri The uri of the cache to be localized * @param conf Configuration to add the cache to */ public static void addCacheFile(URI uri, Configuration conf) { String files = conf.get("mapred.cache.files"); conf.set("mapred.cache.files", files == null ? uri.toString() : files + "," + uri.toString()); } /** * Add a file to be localized to the conf * * @param uri The uri of the cache to be localized * @param conf Configuration to add the cache to */ public static void addSharedCacheFile(URI uri, Configuration conf) { String files = conf.get("mapred.cache.shared.files"); conf.set( "mapred.cache.shared.files", files == null ? uri.toString() : files + "," + uri.toString()); } /** * Add an file path to the current set of classpath entries It adds the file to cache as well. * * @param file Path of the file to be added * @param conf Configuration that contains the classpath setting */ public static void addFileToClassPath(Path file, Configuration conf) throws IOException { String classpath = conf.get("mapred.job.classpath.files"); conf.set( "mapred.job.classpath.files", classpath == null ? file.toString() : classpath + System.getProperty("path.separator") + file.toString()); URI uri = file.makeQualified(file.getFileSystem(conf)).toUri(); addCacheFile(uri, conf); } /** * Get the file entries in classpath as an array of Path * * @param conf Configuration that contains the classpath setting */ public static Path[] getFileClassPaths(Configuration conf) { String classpath = conf.get("mapred.job.classpath.files"); if (classpath == null) return null; ArrayList list = Collections.list(new StringTokenizer(classpath, System.getProperty("path.separator"))); Path[] paths = new Path[list.size()]; for (int i = 0; i < list.size(); i++) { paths[i] = new Path((String) list.get(i)); } return paths; } private static URI addArchiveToClassPathHelper(Path archive, Configuration conf) throws IOException { String classpath = conf.get("mapred.job.classpath.archives"); // the scheme/authority use ':' as separator. put the unqualified path in classpath String archivePath = archive.toUri().getPath(); conf.set( "mapred.job.classpath.archives", classpath == null ? archivePath : classpath + System.getProperty("path.separator") + archivePath); return archive.makeQualified(archive.getFileSystem(conf)).toUri(); } /** * Add an archive path to the current set of classpath entries. It adds the archive to cache as * well. * * @param archive Path of the archive to be added * @param conf Configuration that contains the classpath setting */ public static void addArchiveToClassPath(Path archive, Configuration conf) throws IOException { URI uri = addArchiveToClassPathHelper(archive, conf); addCacheArchive(uri, conf); } /** * Add an archive path to the current set of classpath entries. It adds the archive to cache as * well. * * @param archive Path of the archive to be added * @param conf Configuration that contains the classpath setting */ public static void addSharedArchiveToClassPath(Path archive, Configuration conf) throws IOException { URI uri = addArchiveToClassPathHelper(archive, conf); addSharedCacheArchive(uri, conf); } /** * Get the archive entries in classpath as an array of Path * * @param conf Configuration that contains the classpath setting */ public static Path[] getArchiveClassPaths(Configuration conf) { String classpath = conf.get("mapred.job.classpath.archives"); if (classpath == null) return null; ArrayList list = Collections.list(new StringTokenizer(classpath, System.getProperty("path.separator"))); Path[] paths = new Path[list.size()]; for (int i = 0; i < list.size(); i++) { paths[i] = new Path((String) list.get(i)); } return paths; } /** * This method allows you to create symlinks in the current working directory of the task to all * the cache files/archives * * @param conf the jobconf */ public static void createSymlink(Configuration conf) { conf.set("mapred.create.symlink", "yes"); } /** * This method checks to see if symlinks are to be create for the localized cache files in the * current working directory * * @param conf the jobconf * @return true if symlinks are to be created- else return false */ public static boolean getSymlink(Configuration conf) { String result = conf.get("mapred.create.symlink"); if ("yes".equals(result)) { return true; } return false; } /** * This method checks if there is a conflict in the fragment names of the uris. Also makes sure * that each uri has a fragment. It is only to be called if you want to create symlinks for the * various archives and files. * * @param uriFiles The uri array of urifiles * @param uriArchives the uri array of uri archives */ public static boolean checkURIs(URI[] uriFiles, URI[] uriArchives) { if ((uriFiles == null) && (uriArchives == null)) { return true; } if (uriFiles != null) { for (int i = 0; i < uriFiles.length; i++) { String frag1 = uriFiles[i].getFragment(); if (frag1 == null) return false; for (int j = i + 1; j < uriFiles.length; j++) { String frag2 = uriFiles[j].getFragment(); if (frag2 == null) return false; if (frag1.equalsIgnoreCase(frag2)) return false; } if (uriArchives != null) { for (int j = 0; j < uriArchives.length; j++) { String frag2 = uriArchives[j].getFragment(); if (frag2 == null) { return false; } if (frag1.equalsIgnoreCase(frag2)) return false; for (int k = j + 1; k < uriArchives.length; k++) { String frag3 = uriArchives[k].getFragment(); if (frag3 == null) return false; if (frag2.equalsIgnoreCase(frag3)) return false; } } } } } return true; } private static class CacheStatus { // the local load path of this cache Path localizedLoadPath; // the base dir where the cache lies Path localizedBaseDir; // the unique directory in localizedBaseDir, where the cache lies Path uniqueParentDir; // the size of this cache long size; // number of instances using this cache int refcount; // the cache-file modification time long mtime; // is it initialized boolean inited = false; public CacheStatus(Path baseDir, Path localLoadPath, Path uniqueParentDir) { super(); this.localizedLoadPath = localLoadPath; this.refcount = 0; this.mtime = -1; this.localizedBaseDir = baseDir; this.size = 0; this.uniqueParentDir = uniqueParentDir; } // get the base dir for the cache Path getBaseDir() { return localizedBaseDir; } // Is it initialized? boolean isInited() { return inited; } // mark it as initalized void initComplete() { inited = true; } } /** * Clear the entire contents of the cache and delete the backing files. This should only be used * when the server is reinitializing, because the users are going to lose their files. */ public static void purgeCache(Configuration conf, MRAsyncDiskService service) throws IOException { synchronized (cachedArchives) { LocalFileSystem localFs = FileSystem.getLocal(conf); for (Map.Entry<String, CacheStatus> f : cachedArchives.entrySet()) { try { deleteLocalPath(service, localFs, f.getValue().localizedLoadPath); } catch (IOException ie) { LOG.debug("Error cleaning up cache", ie); } } cachedArchives.clear(); } } /** * Update the maps baseDirSize and baseDirNumberSubDir when deleting cache. * * @param cacheStatus cache status of the cache is deleted */ private static void deleteCacheInfoUpdate(CacheStatus cacheStatus) { if (!cacheStatus.isInited()) { // if it is not created yet, do nothing. return; } synchronized (baseDirSize) { Long dirSize = baseDirSize.get(cacheStatus.getBaseDir()); if (dirSize != null) { dirSize -= cacheStatus.size; baseDirSize.put(cacheStatus.getBaseDir(), dirSize); } } synchronized (baseDirNumberSubDir) { Integer dirSubDir = baseDirNumberSubDir.get(cacheStatus.getBaseDir()); if (dirSubDir != null) { dirSubDir--; baseDirNumberSubDir.put(cacheStatus.getBaseDir(), dirSubDir); } } } /** * Update the maps baseDirSize and baseDirNumberSubDir when adding cache. * * @param cacheStatus cache status of the cache is added */ private static void addCacheInfoUpdate(CacheStatus cacheStatus) { long cacheSize = cacheStatus.size; synchronized (baseDirSize) { Long dirSize = baseDirSize.get(cacheStatus.getBaseDir()); if (dirSize == null) { dirSize = Long.valueOf(cacheSize); } else { dirSize += cacheSize; } baseDirSize.put(cacheStatus.getBaseDir(), dirSize); } synchronized (baseDirNumberSubDir) { Integer dirSubDir = baseDirNumberSubDir.get(cacheStatus.getBaseDir()); if (dirSubDir == null) { dirSubDir = 1; } else { dirSubDir += 1; } baseDirNumberSubDir.put(cacheStatus.getBaseDir(), dirSubDir); } } }
/** @author Javier Paniza */ public abstract class ModelMapping implements java.io.Serializable { private static Log log = LogFactory.getLog(ModelMapping.class); private static boolean codeGenerationTime; private static boolean codeGenerationTimeObtained = false; private MetaComponent metaComponent; private String table; private Map propertyMappings = new HashMap(); private Map referenceMappings; private Collection modelProperties = new ArrayList(); // of String private Collection tableColumns = new ArrayList(); // of String private Collection referenceMappingsWithConverter; // of ReferenceMapping private boolean databaseMetadataLoaded = false; private boolean supportsSchemasInDataManipulation = true; private boolean supportsYearFunction = false; private boolean supportsMonthFunction = false; private boolean supportsTranslateFunction = false; private boolean referencePropertyWithFormula = false; public abstract String getModelName() throws XavaException; public abstract MetaModel getMetaModel() throws XavaException; /** Util specially to find out the type of properties that are not in model, only in mapping. */ public Class getType(String propertyName) throws XavaException { try { return getMetaModel().getMetaProperty(propertyName).getType(); } catch (ElementNotFoundException ex) { // Try to obtain it from primary key if (!(getMetaModel() instanceof MetaEntity)) return java.lang.Object.class; throw ex; } } public String getTable() { // Change this if by polymorphism ? if (isCodeGenerationTime()) return table; if (XavaPreferences.getInstance().isJPAPersistence() && getSchema() == null && !Is.emptyString(XPersistence.getDefaultSchema())) { return XPersistence.getDefaultSchema() + "." + table; } else if (XavaPreferences.getInstance().isHibernatePersistence() && getSchema() == null && !Is.emptyString(XHibernate.getDefaultSchema())) { return XHibernate.getDefaultSchema() + "." + table; } return table; } private static boolean isCodeGenerationTime() { if (!codeGenerationTimeObtained) { codeGenerationTimeObtained = true; try { // Class.forName("CodeGenerator"); ClassLoaderUtil.forName(ModelMapping.class, "CodeGenerator"); codeGenerationTime = true; } catch (Exception ex) { codeGenerationTime = false; } } return codeGenerationTime; } public void setTable(String tabla) { this.table = tabla; } public String getSchema() { int idx = table.indexOf('.'); if (idx < 0) return null; return table.substring(0, idx); } public String getUnqualifiedTable() { int idx = table.indexOf('.'); if (idx < 0) return table; return table.substring(idx + 1); } public String getTableToQualifyColumn() { return supportsSchemasInDataManipulation() ? getTable() : getUnqualifiedTable(); } public void addPropertyMapping(PropertyMapping propertyMapping) throws XavaException { propertyMappings.put(propertyMapping.getProperty(), propertyMapping); modelProperties.add(propertyMapping.getProperty()); // To keep order tableColumns.add(propertyMapping.getColumn()); if (propertyMapping.hasFormula() && !getMetaModel().isAnnotatedEJB3()) { propertyMapping.getMetaProperty().setReadOnly(true); } } public void addReferenceMapping(ReferenceMapping referenceMapping) throws XavaException { if (referenceMappings == null) referenceMappings = new HashMap(); referenceMappings.put(referenceMapping.getReference(), referenceMapping); referenceMapping.setContainer(this); } /** @return Not null */ public ReferenceMapping getReferenceMapping(String name) throws XavaException, ElementNotFoundException { ReferenceMapping r = referenceMappings == null ? null : (ReferenceMapping) referenceMappings.get(name); if (r == null) { throw new ElementNotFoundException("reference_mapping_not_found", name, getModelName()); } return r; } /** @return Not null */ public PropertyMapping getPropertyMapping(String name) throws XavaException, ElementNotFoundException { int i = name.indexOf('.'); if (i >= 0) { String rName = name.substring(0, i); String pName = name.substring(i + 1); if (isReferenceNameInReferenceMappings(rName)) { return getReferenceMapping(rName).getReferencedMapping().getPropertyMapping(pName); } else { // by embedded references: address.city -> address_city return getPropertyMapping(name.replace(".", "_")); } } PropertyMapping p = propertyMappings == null ? null : (PropertyMapping) propertyMappings.get(name); if (p == null) { throw new ElementNotFoundException("property_mapping_not_found", name, getModelName()); } return p; } private boolean isReferenceNameInReferenceMappings(String referenceName) { Collection<ReferenceMapping> col = getReferenceMappings(); for (ReferenceMapping rm : col) if (rm.getReference().equals(referenceName)) return true; return false; } /** * In the order that they was added. * * @return Collection of <tt>String</tt>. */ public Collection getModelProperties() { return modelProperties; } /** * In the order that they was added. * * @return Collection of <tt>String</tt>. */ public Collection getColumns() { return tableColumns; } public String getKeyColumnsAsString() throws XavaException { StringBuffer r = new StringBuffer(); Collection columns = new HashSet(); for (Iterator it = getMetaModel().getAllKeyPropertiesNames().iterator(); it.hasNext(); ) { String pr = (String) it.next(); String column = getColumn(pr); if (columns.contains(column)) continue; columns.add(column); r.append(column); r.append(' '); } return r.toString().trim(); } private boolean supportsSchemasInDataManipulation() { loadDatabaseMetadata(); return supportsSchemasInDataManipulation; } /** Wraps the column name with the SQL function for extracting the year from a date. */ public String yearSQLFunction(String column) { if (supportsYearFunction()) return "year(" + column + ")"; return "extract (year from " + column + ")"; } /** Wraps the column name with the SQL function for extracting the month from a date. */ public String monthSQLFunction(String column) { if (supportsMonthFunction()) return "month(" + column + ")"; return "extract (month from " + column + ")"; } /** * To ignore accents: just to search 'cami�n' or 'camion' * * <p>Good performance using 'translate' but is very slow when it use 'replace...' * * @since v4m6 */ public String translateSQLFunction(String column) { if (supportsTranslateFunction()) return "translate(" + column + ",'aeiouAEIOU','áéíóúÁÉÍÓÚ')"; return "replace(replace(replace(replace(replace(replace(replace(replace(replace(replace(" + column + ", 'Ú', 'U'), 'ú', 'u'), 'Ó', 'O'), 'ó', 'o'), 'Í', 'I'), " + "'í', 'i'), 'É', 'E'), 'é', 'e'), 'Á', 'A'), 'á', 'a')"; } private boolean supportsYearFunction() { loadDatabaseMetadata(); return supportsYearFunction; } private boolean supportsMonthFunction() { loadDatabaseMetadata(); return supportsMonthFunction; } /** @since v4m6 */ private boolean supportsTranslateFunction() { loadDatabaseMetadata(); return supportsTranslateFunction; } private void loadDatabaseMetadata() { if (!databaseMetadataLoaded) { String componentName = "UNKNOWN"; Connection con = null; try { componentName = getMetaComponent().getName(); con = DataSourceConnectionProvider.getByComponent(componentName).getConnection(); DatabaseMetaData metaData = con.getMetaData(); supportsSchemasInDataManipulation = metaData.supportsSchemasInDataManipulation(); Collection timeDateFunctions = Strings.toCollection(metaData.getTimeDateFunctions().toUpperCase()); // // another solution instead of the use of 'if' would be to use a xml with // the information of the functions from each BBDD if ("DB2 UDB for AS/400".equals(metaData.getDatabaseProductName()) || "Oracle".equals(metaData.getDatabaseProductName()) || "PostgresSQL".equals(metaData.getDatabaseProductName())) { supportsTranslateFunction = true; } if ("Oracle".equals(metaData.getDatabaseProductName()) || "PostgreSQL".equals(metaData.getDatabaseProductName())) { supportsYearFunction = supportsMonthFunction = false; } else { supportsYearFunction = timeDateFunctions.contains("YEAR"); supportsMonthFunction = timeDateFunctions.contains("MONTH"); } databaseMetadataLoaded = true; } catch (Exception ex) { log.warn(XavaResources.getString("load_database_metadata_warning")); } finally { try { if (con != null) { con.close(); } } catch (SQLException e) { log.warn(XavaResources.getString("close_connection_warning")); } } } } public String getQualifiedColumn(String modelProperty) throws XavaException { PropertyMapping propertyMapping = (PropertyMapping) propertyMappings.get(modelProperty); if (propertyMapping != null && propertyMapping.hasFormula()) return getColumn(modelProperty); String tableColumn = getTableColumn(modelProperty, true); if (Is.emptyString(tableColumn)) return "'" + modelProperty + "'"; if (referencePropertyWithFormula) { referencePropertyWithFormula = false; return tableColumn; } // for calculated fields or created by multiple converter if (modelProperty.indexOf('.') >= 0) { if (tableColumn.indexOf('.') < 0) return tableColumn; String reference = modelProperty.substring(0, modelProperty.lastIndexOf('.')); if (tableColumn.startsWith(getTableToQualifyColumn() + ".")) { String member = modelProperty.substring(modelProperty.lastIndexOf('.') + 1); if (getMetaModel().getMetaReference(reference).getMetaModelReferenced().isKey(member)) return tableColumn; } // The next code uses the alias of the table instead of its name. In order to // support multiple references to the same model if (reference.indexOf('.') >= 0) { if (getMetaModel().getMetaProperty(modelProperty).isKey()) { reference = reference.substring(0, reference.lastIndexOf('.')); } reference = reference.replaceAll("\\.", "_"); } return "T_" + reference + tableColumn.substring(tableColumn.lastIndexOf('.')); } else { return getTableToQualifyColumn() + "." + tableColumn; } } /** Support the use of references with dots, this is: myreference.myproperty. */ public String getColumn(String modelProperty) throws ElementNotFoundException, XavaException { return getTableColumn(modelProperty, false); } private String getTableColumn(String modelProperty, boolean qualifyReferenceMappingColumn) throws XavaException { PropertyMapping propertyMapping = (PropertyMapping) propertyMappings.get(modelProperty); if (propertyMapping == null) { int idx = modelProperty.indexOf('.'); if (idx >= 0) { String referenceName = modelProperty.substring(0, idx); String propertyName = modelProperty.substring(idx + 1); if (getMetaModel().getMetaReference(referenceName).isAggregate() && !Strings.firstUpper(referenceName).equals(getMetaModel().getContainerModelName())) { propertyMapping = (PropertyMapping) propertyMappings.get(referenceName + "_" + propertyName); if (propertyMapping == null) { int idx2 = propertyName.indexOf('.'); if (idx2 >= 0) { String referenceName2 = propertyName.substring(0, idx2); String propertyName2 = propertyName.substring(idx2 + 1); return getTableColumn( referenceName + "_" + referenceName2 + "." + propertyName2, qualifyReferenceMappingColumn); } else { throw new ElementNotFoundException( "property_mapping_not_found", referenceName + "_" + propertyName, getModelName()); } } return propertyMapping.getColumn(); } ReferenceMapping referenceMapping = getReferenceMapping(referenceName); if (referenceMapping.hasColumnForReferencedModelProperty(propertyName)) { if (qualifyReferenceMappingColumn) { return getTableToQualifyColumn() + "." + referenceMapping.getColumnForReferencedModelProperty(propertyName); } else { return referenceMapping.getColumnForReferencedModelProperty(propertyName); } } else { ModelMapping referencedMapping = referenceMapping.getReferencedMapping(); String tableName = referencedMapping.getTableToQualifyColumn(); boolean secondLevel = propertyName.indexOf('.') >= 0; String columnName = referencedMapping.getTableColumn(propertyName, secondLevel); boolean hasFormula = referencedMapping.getPropertyMapping(propertyName).hasFormula(); if (qualifyReferenceMappingColumn && !secondLevel && !hasFormula) { return tableName + "." + columnName; } else if (hasFormula) { String formula = referencedMapping.getPropertyMapping(propertyName).getFormula(); referencePropertyWithFormula = true; return qualifyFormulaWithReferenceName( formula, referencedMapping.getModelName(), modelProperty); } else { return columnName; } } } throw new ElementNotFoundException( "property_mapping_not_found", modelProperty, getModelName()); } if (propertyMapping.hasFormula()) return propertyMapping.getFormula(); return propertyMapping.getColumn(); } /** * @exception ElementNotFoundException If property does not exist. * @exception XavaException Any problem * @return nulo If property exists but it does not have converter. */ public IConverter getConverter(String modelProperty) throws ElementNotFoundException, XavaException { return getPropertyMapping(modelProperty).getConverter(); } /** * @exception ElementNotFoundException If property does not exist. * @exception XavaException Any problem * @return nulo If property exists but it does not have converter. */ public IMultipleConverter getMultipleConverter(String modelProperty) throws ElementNotFoundException, XavaException { return getPropertyMapping(modelProperty).getMultipleConverter(); } /** If the property exists and has converter. */ public boolean hasConverter(String propertyName) { try { return getPropertyMapping(propertyName).hasConverter(); } catch (XavaException ex) { return false; } } public MetaComponent getMetaComponent() { return metaComponent; } public void setMetaComponent(MetaComponent componente) throws XavaException { this.metaComponent = componente; setupDefaultConverters(); } /** * Change the properties inside ${ } by the database qualified(schema + table) columns. Also if * the property inside ${ } is a model name it changes by the table name * * <p>For example, it would change: * * <pre> * select ${number}, ${name} from ${Tercero} * </pre> * * by * * <pre> * select G4GENBD.GENTGER.TGRCOD, G4GENBD.GENTGER.TGRDEN from G4GENBD.GENTGER * </pre> */ public String changePropertiesByColumns(String source) throws XavaException { return changePropertiesByColumns(source, true); } /** * Change the properties inside ${ } by the database columns without table and schema as prefix. * Also if the property inside ${ } is a model name it changes by the table name. * * <p>For example, it would change: * * <pre> * select ${number}, ${name} from ${Tercero} * </pre> * * by * * <pre> * select TGRCOD, TGRDEN * from G4GENBD.GENTGER * </pre> */ public String changePropertiesByNotQualifiedColumns(String source) throws XavaException { return changePropertiesByColumns(source, false); } private String changePropertiesByColumns(String source, boolean qualified) throws XavaException { StringBuffer r = new StringBuffer(source); int i = r.toString().indexOf("${"); int f = 0; while (i >= 0) { f = r.toString().indexOf("}", i + 2); if (f < 0) break; String property = r.substring(i + 2, f); String column = "0"; // thus it remained if it is calculated if (!getMetaModel().isCalculated(property)) { column = Strings.isModelName(property) ? getTable(property) : qualified ? getQualifiedColumn(property) : getColumn(property); } r.replace(i, f + 1, column); i = r.toString().indexOf("${"); } return r.toString(); } /** @since 4.1 */ private String getTable(String name) { return MetaComponent.get(name).getEntityMapping().getTable(); } public String changePropertiesByCMPAttributes(String source) throws XavaException { StringBuffer r = new StringBuffer(source); int i = r.toString().indexOf("${"); int f = 0; while (i >= 0) { f = r.toString().indexOf("}", i + 2); if (f < 0) break; String property = r.substring(i + 2, f); String cmpAttribute = null; if (property.indexOf('.') >= 0) { cmpAttribute = "o._" + Strings.firstUpper(Strings.change(property, ".", "_")); } else { MetaProperty metaProperty = getMetaModel().getMetaProperty(property); if (metaProperty.getMapping().hasConverter()) { cmpAttribute = "o._" + Strings.firstUpper(property); } else { cmpAttribute = "o." + property; } } r.replace(i, f + 1, cmpAttribute); i = r.toString().indexOf("${"); } return r.toString(); } public boolean hasPropertyMapping(String memberName) { return propertyMappings.containsKey(memberName); } private void setupDefaultConverters() throws XavaException { Iterator it = propertyMappings.values().iterator(); while (it.hasNext()) { PropertyMapping propertyMapping = (PropertyMapping) it.next(); propertyMapping.setDefaultConverter(); } } public boolean hasReferenceMapping(MetaReference metaReference) { if (referenceMappings == null) return false; return referenceMappings.containsKey(metaReference.getName()); } public boolean isReferenceOverlappingWithSomeProperty( String reference, String propertiesOfReference) throws XavaException { String column = getReferenceMapping(reference).getColumnForReferencedModelProperty(propertiesOfReference); return containsColumn(getColumns(), column); } public boolean isReferenceOverlappingWithSomeProperty(String reference) throws XavaException { Iterator it = getReferenceMapping(reference).getDetails().iterator(); while (it.hasNext()) { ReferenceMappingDetail d = (ReferenceMappingDetail) it.next(); if (containsColumn(getColumns(), d.getColumn())) { String property = getMappingForColumn(d.getColumn()).getProperty(); if (!property.startsWith(reference + "_")) { return true; } } } return false; } public boolean isReferencePropertyOverlappingWithSomeProperty(String qualifiedProperty) throws XavaException { int idx = qualifiedProperty.indexOf('.'); if (idx < 0) return false; String ref = qualifiedProperty.substring(0, idx); String pr = qualifiedProperty.substring(idx + 1); return isReferenceOverlappingWithSomeProperty(ref, pr); } /** @throws XavaException If it does not have a overlapped property, or any other problem. */ public String getOverlappingPropertyForReference(String reference, String propertyOfReference) throws XavaException { String column = getReferenceMapping(reference).getColumnForReferencedModelProperty(propertyOfReference); if (propertyMappings == null) { throw new XavaException("reference_property_not_overlapped", propertyOfReference, reference); } Iterator it = propertyMappings.values().iterator(); while (it.hasNext()) { PropertyMapping mapping = (PropertyMapping) it.next(); if (column.equalsIgnoreCase(mapping.getColumn())) return mapping.getProperty(); } throw new XavaException("reference_property_not_overlapped", propertyOfReference, reference); } /** @return Of <tt>String</tt> and not null. */ public Collection getOverlappingPropertiesOfReference(String reference) throws XavaException { Collection overlappingPropertiesOfReference = new ArrayList(); Iterator it = getReferenceMapping(reference).getDetails().iterator(); while (it.hasNext()) { ReferenceMappingDetail d = (ReferenceMappingDetail) it.next(); if (containsColumn(getColumns(), d.getColumn())) { String property = getMappingForColumn(d.getColumn()).getProperty(); if (!property.startsWith(reference + "_")) { overlappingPropertiesOfReference.add(d.getReferencedModelProperty()); } } } return overlappingPropertiesOfReference; } private boolean containsColumn(Collection columns, String column) { if (columns.contains(column)) return true; for (Iterator it = columns.iterator(); it.hasNext(); ) { if (((String) it.next()).equalsIgnoreCase(column)) return true; } return false; } private PropertyMapping getMappingForColumn(String column) throws XavaException { if (propertyMappings == null) { throw new ElementNotFoundException("mapping_not_found_no_property_mappings", column); } Iterator it = propertyMappings.values().iterator(); while (it.hasNext()) { PropertyMapping propertyMapping = (PropertyMapping) it.next(); if (propertyMapping.getColumn().equalsIgnoreCase(column)) { return propertyMapping; } } throw new ElementNotFoundException("mapping_for_column_not_found", column); } String getCMPAttributeForColumn(String column) throws XavaException { PropertyMapping mapping = getMappingForColumn(column); if (!mapping.hasConverter()) return Strings.change(mapping.getProperty(), ".", "_"); return "_" + Strings.change(Strings.firstUpper(mapping.getProperty()), ".", "_"); } private Collection getPropertyMappings() { return propertyMappings.values(); } public Collection getPropertyMappingsNotInModel() throws XavaException { Collection names = new ArrayList(getModelProperties()); names.removeAll(getMetaModel().getPropertiesNames()); if (names.isEmpty()) return Collections.EMPTY_LIST; Collection result = new ArrayList(); for (Iterator it = names.iterator(); it.hasNext(); ) { String name = (String) it.next(); if (name.indexOf('_') < 0) { result.add(getPropertyMapping(name)); } } return result; } private Collection getReferenceMappings() { return referenceMappings == null ? Collections.EMPTY_LIST : referenceMappings.values(); } public Collection getCmpFields() throws XavaException { Collection r = new ArrayList(); Collection mappedColumns = new HashSet(); for (Iterator it = getPropertyMappings().iterator(); it.hasNext(); ) { PropertyMapping pMapping = (PropertyMapping) it.next(); r.addAll(pMapping.getCmpFields()); mappedColumns.add(pMapping.getColumn()); } for (Iterator it = getReferenceMappings().iterator(); it.hasNext(); ) { ReferenceMapping rMapping = (ReferenceMapping) it.next(); for (Iterator itFields = rMapping.getCmpFields().iterator(); itFields.hasNext(); ) { CmpField field = (CmpField) itFields.next(); if (!mappedColumns.contains(field.getColumn())) { r.add(field); mappedColumns.add(field.getColumn()); } } } return r; } public boolean hasReferenceConverters() { return !getReferenceMappingsWithConverter().isEmpty(); } public Collection getReferenceMappingsWithConverter() { if (referenceMappingsWithConverter == null) { referenceMappingsWithConverter = new ArrayList(); Iterator it = getReferenceMappings().iterator(); while (it.hasNext()) { ReferenceMapping referenceMapping = (ReferenceMapping) it.next(); Collection mrd = referenceMapping.getDetails(); Iterator itd = mrd.iterator(); while (itd.hasNext()) { ReferenceMappingDetail referenceMappingDetail = (ReferenceMappingDetail) itd.next(); if (referenceMappingDetail.hasConverter()) { referenceMappingsWithConverter.add(referenceMapping); } } } } return referenceMappingsWithConverter; } /** * Find the columns name in the formula and replace its by qualify columns name: 'name' -> * 't_reference.name' */ private String qualifyFormulaWithReferenceName( String formula, String referenceName, String modelProperty) { EntityMapping em = MetaComponent.get(referenceName).getEntityMapping(); Iterator<String> it = em.getColumns().iterator(); while (it.hasNext()) { String column = it.next(); if (formula.contains(column)) { formula = formula.replace( column, getQualifyColumnName(modelProperty, referenceName + "." + column)); } } return formula; } private String getQualifyColumnName(String modelProperty, String tableColumn) { if (modelProperty.indexOf('.') >= 0) { if (tableColumn.indexOf('.') < 0) return tableColumn; String reference = modelProperty.substring(0, modelProperty.lastIndexOf('.')); if (tableColumn.startsWith(getTableToQualifyColumn() + ".")) { String member = modelProperty.substring(modelProperty.lastIndexOf('.') + 1); if (getMetaModel().getMetaReference(reference).getMetaModelReferenced().isKey(member)) return tableColumn; } // The next code uses the alias of the table instead of its name. In order to // support multiple references to the same model if (reference.indexOf('.') >= 0) { if (getMetaModel().getMetaProperty(modelProperty).isKey()) { reference = reference.substring(0, reference.lastIndexOf('.')); } reference = reference.substring(reference.lastIndexOf('.') + 1); } return "T_" + reference + tableColumn.substring(tableColumn.lastIndexOf('.')); } else { return getTableToQualifyColumn() + "." + tableColumn; } } }
/** * 例外ハンドラを集めたメソッドクラスです。 * * @author unitarou <[email protected]> */ public class Exceptions { /** このクラスのロガー */ private static final Log log_s_ = LogFactory.getLog(Exceptions.class); /** * 引数がラップしている例外をトレースして最初に{@link Throwable#getMessage()}が <code>null</code>以外を返した段階でその値を返します。<br> * 内部で再帰処理を行っています。 * * @param throwable メッセージを抜き出したい例外 * @return ラップした例外のメッセージ 引数が<code>null</code>の場合は<code>null</code>。 */ public static String getRootMessage(Throwable throwable) { if (throwable == null) { return null; } Throwable root = getRootException(throwable); if (root.getMessage() != null) { return root.getMessage(); } return null; } /** * throwableの最上位の例外を調べて返します。<br> * 引数が<code>null</code>の場合は<code>null</code>を返します。 * * @param throwable NULL * @return throwableの最上位の例外。NULL */ public static Throwable getRootException(Throwable throwable) { if (throwable == null) { return null; } Throwable cause = throwable; while (cause.getCause() != null) { cause = cause.getCause(); } return cause; } /** * {@link MessageFormat#format(String, Object...)}をラップしているだけですが、 * 呼び出しに伴い発生する例外は警告レベルでログに出して全て無視します。<br> * これは、例外送出時のメッセージなど、フォーマットエラーがあっても処理を続行したい場合に使います。 * * @param pattern NULL * @param arguments NULL * @return NOT NULL */ public static String format(String pattern, Object... arguments) { try { return MessageFormat.format(pattern, arguments); } catch (Throwable t) { // log_s_.log(LogLevel.WARN, t, pattern, arguments); return Strings.nullToNullMark(pattern).toString() + "; " + Strings.concatWithComma(arguments).toString(); // $NON-NLS-1$ } } /** */ protected Exceptions() { super(); } }
public class SearchResultBean implements Serializable { /** */ private static final long serialVersionUID = -837802320118584736L; protected Search.Result searchResult; protected static Log logger = LogFactory.getLog(SearchResultBean.class.getName()); protected Locale locale; protected boolean display = false; protected boolean selected = false; public SearchResultBean() { display = false; } public SearchResultBean(Search.Result searchResult, Locale locale) { this.searchResult = searchResult; this.locale = locale; this.display = true; this.selected = true; } public boolean getDisplay() { return display; } public boolean getSelected() { return selected; } public void setSelected(boolean selected) { logger.debug("SearchResultBean.setChecked():" + getUniqueID() + "," + selected); this.selected = selected; } public List<DisplayField> getFieldValues() { ArrayList<DisplayField> list = new ArrayList<DisplayField>(); EmailFields emailFields = Config.getConfig().getEmailFields(); for (EmailField field : emailFields.getAvailableFields().values()) { if (field.getShowInResults() != EmailField.ShowInResults.NORESULTS) { try { EmailFieldValue efv = searchResult.getFieldValue(field.getName()); list.add(DisplayField.getDisplayField(efv, locale, false)); } catch (MessageSearchException mse) { logger.debug("failed to retrieve field value from message: " + mse.getMessage()); } } } return list; } public String getUniqueID() { try { return searchResult.getEmailId().getUniqueID(); } catch (MessageSearchException mse) { logger.debug("failed to retrieve unique message id: " + mse.getMessage(), mse); return null; } } public boolean getMessageExist() { try { EmailID emailID = searchResult.getEmailId(); Volume volume = emailID.getVolume(); return (volume != null); /*if (volume!=null) { Archiver archiver = Config.getConfig().getArchiver(); boolean exists = archiver.isMessageExist(emailID); if (!exists) { logger.debug("message is not accessible on disk"); } return exists; } else { logger.debug("could not lookup volume. the index appears out of sync with volumeinfo ID field."); }*/ } catch (Exception e) { logger.debug("failed to determine if message exists in store:" + e.getMessage(), e); } return false; } public String getVolumeID() { try { EmailID emailID = searchResult.getEmailId(); Volume volume = emailID.getVolume(); if (volume != null) { String volumeID = volume.getID(); return volumeID; } else return null; } catch (MessageSearchException mse) { logger.debug("failed to retrieve volumeid: " + mse.getMessage(), mse); return null; } // return searchResult.getEmailId().getVolume().getID(); } public static synchronized List<SearchResultBean> getSearchResultBeans( List<Search.Result> results, Locale locale) { List<SearchResultBean> searchResultBeans = new LinkedList<SearchResultBean>(); try { for (Search.Result result : results) { searchResultBeans.add(new SearchResultBean(result, locale)); } while (searchResultBeans.size() < Config.getConfig().getSearch().getMaxSearchResults()) { searchResultBeans.add(new SearchResultBean()); } } catch (java.util.ConcurrentModificationException ce) { // bit of a hack to say the least try { Thread.sleep(50); } catch (Exception e) { } return getSearchResultBeans(results, locale); } return searchResultBeans; } }
/** * It's too difficult to make this a unit test, since the TastyClient is purely an integration * element. So this is basically an integration test. It's dependent on the Tasty server being * available, and uses the service "javaTastyTest-(randomnumber)". Since other people may be testing * simulataneously, the random number ensures that people do not interfere with eachother. */ public class TastyClientTest extends AbstractTestCase { protected final Log logger = LogFactory.getLog(getClass()); private TastyClient tc; // private String tastyServerUrl="http://kang.ccnmtl.columbia.edu:4090/eddie/jtasty/rest"; private String tastyServerUrl = ""; private String serviceName; private String unique; /** * Constructor * * @param testName name of the test case */ public TastyClientTest(String testName) { super(testName); } /** @return the suite of tests being tested */ public static Test suite() { return new TestSuite(TastyClientTest.class); } public void setUp() throws Exception { String time = String.valueOf((new Date()).getTime()).substring(8); String random = String.valueOf(Math.random()).substring(2); unique = (time + random); serviceName = "javaTastyTest-" + unique; JTastyDAO dao = new JTastyMockDAO(); TastyBean tb = new TastyBean(); tb.setDao(dao); tc = new TastyClient(tastyServerUrl, serviceName); tc.setTastyBean(tb); logger.debug(tastyServerUrl + "/service/" + serviceName); // create the service: tc.post(tastyServerUrl + "/service/" + serviceName); // add the user: tc.beanstatus(); } public void testTasty() throws Exception { try { System.out.println(tc.getRawOutput()); } catch (UnknownHostException e) { fail( "Either you didn't add tasty to your /etc/hosts file or tasty is down! See docs for info"); } // set up some sets of tags to be reused: HashSet ac = new HashSet(Arrays.asList(new String[] {"a", "c"})); HashSet cd = new HashSet(Arrays.asList(new String[] {"c", "d"})); HashSet abc = new HashSet(Arrays.asList(new String[] {"a", "b", "c"})); HashSet bcd = new HashSet(Arrays.asList(new String[] {"b", "c", "d"})); HashSet abcd = new HashSet(Arrays.asList(new String[] {"a", "b", "c", "d"})); // should come back empty: Set allUserTags = tc.getAllTagsForUser("testUser"); assertEquals(0, allUserTags.size()); // add some tags (and items): tc.addTagsToItem("testUser", "testItem", abc); tc.addTagsToItem("testUser", "testItem2", cd); // should come back with 4 tags: Set tags = tc.getAllTagsForUser("testUser"); assertEquals(abcd, tags); // should return 3 tags: tags = tc.getTagsForItem("testUser", "testItem"); assertEquals(abc, tags); // should return 2 tags: tags = tc.getTagsForItem("testUser", "testItem2"); assertEquals(cd, tags); // get all items and their tags: Map taggedItems = tc.getAllItemsAndTagsForUser("testUser"); tags = (Set) taggedItems.get("testItem"); assertEquals(abc, tags); tags = (Set) taggedItems.get("testItem2"); assertEquals(cd, tags); // delete all tags: tc.deleteTagsFromItem("testUser", "testItem", abc); tc.deleteTagsFromItem("testUser", "testItem2", cd); // verify that the tag pool is empty: tags = tc.getAllTagsForUser("testUser"); assertEquals(0, tags.size()); // verify that the items are tagless: tags = tc.getTagsForItem("testUser", "testItem"); assertEquals(0, tags.size()); tags = tc.getTagsForItem("testUser", "testItem2"); assertEquals(0, tags.size()); // try syncing (add-only): tc.setTagsForItem("testUser", "testItem", abc); // verify: tags = tc.getTagsForItem("testUser", "testItem"); assertEquals(abc, tags); // another sync (delete-only): tc.setTagsForItem("testUser", "testItem", ac); // verify: tags = tc.getTagsForItem("testUser", "testItem"); assertEquals(ac, tags); // one more sync (add and delete): tc.setTagsForItem("testUser", "testItem", bcd); // verify: tags = tc.getTagsForItem("testUser", "testItem"); assertEquals(bcd, tags); } public void tearDown() throws Exception { // delete the service: tc.delete(tastyServerUrl + "/service/" + serviceName); } }
/** * MDC server * * @author yjiang */ public abstract class MDCServer extends IoHandlerAdapter { static final Log log = LogFactory.getLog(MDCServer.class); static final AtomicInteger counter = new AtomicInteger(0); /** the the max size of a packet, 32KB */ static int MAX_SIZE = 10240 * 1024; // test protected InetSocketAddress address; protected Selector selector; protected ServerSocketChannel server; protected final int PROCESS_NUMBER = 4; protected static Configuration _conf; protected IoAcceptor acceptor; protected boolean isRunning = false; protected boolean testKey() { String data = UID.random(24); byte[] bb = RSA.encode(data.getBytes(), TConn.pub_key); if (bb != null) { bb = RSA.decode(bb, TConn.pri_key); if (bb != null && data.equals(new String(bb))) { return true; } } return false; } /** Close. */ public void close() { if (selector != null) { selector.wakeup(); try { selector.close(); } catch (IOException e1) { log.warn("close selector fails", e1); } finally { selector = null; } } if (server != null) { try { server.socket().close(); server.close(); } catch (IOException e) { log.warn("close socket server fails", e); } finally { server = null; } } } /** * Instantiates a new MDC server. * * @param host the host * @param port the port */ protected MDCServer(String host, int port) { _conf = Config.getConfig(); address = (host == null) ? new InetSocketAddress(port) : new InetSocketAddress(host, port); /** initialize app command */ Command.init(); /** initialize the connection center */ TConnCenter.init(_conf, port); synchronized (_conf) { /** load public key from database */ TConn.pub_key = SystemConfig.s("pub_key", null); TConn.pri_key = SystemConfig.s("pri_key", null); /** initialize the RSA key, hardcode 2048 bits */ if (TConn.pub_key == null || TConn.pri_key == null || "".equals(TConn.pub_key) || "".equals(TConn.pri_key)) { /** print out the old state */ log.warn( "the pub_key or pri_key missed, the old state are pub_key:[" + TConn.pub_key + "], pri_key:[" + TConn.pri_key + "]"); Key k = RSA.generate(2048); TConn.pri_key = k.pri_key; TConn.pub_key = k.pub_key; /** print out the new public key */ log.warn("create new RSA key pair, pub_key:[" + TConn.pub_key + ']'); /** set back in database */ SystemConfig.setConfig("pri_key", TConn.pri_key); SystemConfig.setConfig("pub_key", TConn.pub_key); } MAX_SIZE = SystemConfig.i("mdc.max_size", MAX_SIZE); } } /** * Start. * * @return the MDC server */ public abstract MDCServer start(); /** Stop. */ public void stop() { acceptor.unbind(); } /** * Service. * * @param o the o * @param session the session */ void service(IoBuffer o, IoSession session) { try { // System.out.println(o.remaining() + "/" + o.capacity()); session.setAttribute("last", System.currentTimeMillis()); SimpleIoBuffer in = (SimpleIoBuffer) session.getAttribute("buf"); if (in == null) { in = SimpleIoBuffer.create(4096); session.setAttribute("buf", in); } byte[] data = new byte[o.remaining()]; o.get(data); in.append(data); // log.debug("recv: " + data.length + ", " + // session.getRemoteAddress()); while (in.length() > 5) { in.mark(); /** * Byte 1: head of the package<br> * bit 7-6: "01", indicator of MDC<br> * bit 5: encrypt indicator, "0": no; "1": encrypted<br> * bit 4: zip indicator, "0": no, "1": ziped<br> * bit 0-3: reserved<br> * Byte 2-5: length of data<br> * Byte[…]: data array<br> */ byte head = in.read(); /** test the head indicator, if not correct close it */ if ((head & 0xC0) != 0x40) { log.info("flag is not correct! flag:" + head + ",from: " + session.getRemoteAddress()); session.write("error.head"); session.close(true); return; } int len = in.getInt(); if (len <= 0 || len > MAX_SIZE) { log.error( "mdcserver.Wrong lendth: " + len + "/" + MAX_SIZE + " - " + session.getRemoteAddress()); session.write("error.packet.size"); session.close(true); break; } // log.info("packet.len:" + len + ", len in buffer:" + // in.length()); if (in.length() < len) { in.reset(); break; } else { // do it byte[] b = new byte[len]; in.read(b); // log.info("stub.package.size: " + len + ", head:" + head + // ", cmd:" + Bean.toString(b)); // log.info("stub.package.size: " + len + ", head:" + head); /** test the zip flag */ if ((head & 0x10) != 0) { b = Zip.unzip(b); } final TConn d = (TConn) session.getAttribute("conn"); if (d != null) { /** test the encrypted flag */ if ((head & 0x20) != 0) { b = DES.decode(b, d.deskey); } final byte[] bb = b; /** test if the packet is for mdc or app */ new WorkerTask() { @Override public void onExecute() { d.process(bb); } }.schedule(0); session.setAttribute("last", System.currentTimeMillis()); } else { session.write("error.getconnection"); log.error("error to get connection: " + session.getRemoteAddress()); session.close(true); } } } } catch (Throwable e) { log.error("closing stub: " + session.getRemoteAddress(), e); session.write("exception." + e.getMessage()); session.close(true); } } /* * (non-Javadoc) * * @see * org.apache.mina.core.service.IoHandlerAdapter#sessionCreated(org.apache * .mina.core.session.IoSession) */ public void sessionCreated(IoSession session) throws Exception { log.info("stub created:" + session.getRemoteAddress()); Counter.add("mdc", "connection", 1); TConn d = new TConn(session); d.set("x-forwarded-for", session.getRemoteAddress().toString()); session.setAttribute("conn", d); } /* * (non-Javadoc) * * @see * org.apache.mina.core.service.IoHandlerAdapter#sessionClosed(org.apache * .mina.core.session.IoSession) */ public void sessionClosed(IoSession session) throws Exception { log.info("closed stub: " + session.getRemoteAddress()); TConn d = (TConn) session.getAttribute("conn"); if (d != null) { d.close(); session.removeAttribute("conn"); } } /* * (non-Javadoc) * * @see * org.apache.mina.core.service.IoHandlerAdapter#sessionIdle(org.apache. * mina.core.session.IoSession, org.apache.mina.core.session.IdleStatus) */ public void sessionIdle(IoSession session, IdleStatus status) throws Exception { if (IdleStatus.BOTH_IDLE.equals(status)) { Long l = (Long) session.getAttribute("last"); if (l != null && System.currentTimeMillis() - l > 60 * 1000) { session.close(true); } } } /* * (non-Javadoc) * * @see * org.apache.mina.core.service.IoHandlerAdapter#messageReceived(org.apache * .mina.core.session.IoSession, java.lang.Object) */ public void messageReceived(IoSession session, Object message) throws Exception { // System.out.println(message); if (message instanceof IoBuffer) { service((IoBuffer) message, session); } } /** * Creates the tcp server. * * @param host the host * @param port the port * @return the MDC server */ public static synchronized MDCServer createTcpServer(String host, int port) { return new TDCServer(host, port); } /** * Creates the udp server. * * @param host the host * @param port the port * @return the MDC server */ public static synchronized MDCServer createUdpServer(String host, int port) { return new UDCServer(host, port); } /* * (non-Javadoc) * * @see * org.apache.mina.core.service.IoHandlerAdapter#exceptionCaught(org.apache * .mina.core.session.IoSession, java.lang.Throwable) */ @Override public void exceptionCaught(IoSession session, Throwable cause) throws Exception { TConn d = (TConn) session.getAttribute("conn"); if (d != null && d.valid()) { App.bye(d); } } }
/** * Implements query handling for the 3 gene-related queries (transgene, gene expression and * phenotype) * * @author nmilyaev */ public class FCMappingDAO extends AQueryDAO { @SuppressWarnings("unused") private final String updateSQL = "update third_party_site_lookup set "; private static final Log LOG = LogFactory.getLog(FCMappingDAO.class); /** * Queries the tables to extract salient information (feature structure name, reference details) * for a transgene query */ @SuppressWarnings({"unchecked", "unused"}) public List<Record> getAll() { String query = "select * from fc_mapping"; LOG.debug("Query : " + query); long startTime = System.currentTimeMillis(); List<Record> results = this.jdbcTemplate.query( query, new Object[] {}, (RowMapper) new FCMappingResultSetExtractor()); return results; } protected void setUp() { Resource res = new FileSystemResource("build/classes/beans.xml"); XmlBeanFactory factory = new XmlBeanFactory(res); DataSource vfbDS = (DataSource) factory.getBean("vfbDataSource"); this.setDataSource(vfbDS); LOG.debug("data source : " + vfbDS); } private void process() { List<Record> list = this.getAll(); for (Record curr : list) { LOG.debug("curr:" + curr); } } public static void main(String[] args) { FCMappingDAO dao = new FCMappingDAO(); dao.setUp(); dao.process(); } private class Record { private String vfbid; private String id; private String longName; private String shortName; public Record(String vfbid, String id, String longName, String shortName) { super(); this.vfbid = vfbid; this.id = id; this.longName = longName; this.shortName = shortName; } public String toString() { return this.vfbid + " : " + this.longName + " > " + this.shortName; } } private class FCMappingResultSetExtractor implements ResultSetExtractor, RowMapper { public Object extractData(ResultSet rs) throws SQLException { Record res = new Record(rs.getString(1), rs.getString(2), rs.getString(3), rs.getString(4)); return res; } public Object mapRow(ResultSet rs, int line) { try { return extractData(rs); } catch (Exception ex) { ex.printStackTrace(); return null; } } } }
/** * A simple RPC mechanism. * * <p>A <i>protocol</i> is a Java interface. All parameters and return types must be one of: * * <ul> * <li>a primitive type, <code>boolean</code>, <code>byte</code>, <code>char</code>, <code>short * </code>, <code>int</code>, <code>long</code>, <code>float</code>, <code>double</code>, or * <code>void</code>; or * <li>a {@link String}; or * <li>a {@link Writable}; or * <li>an array of the above types * </ul> * * All methods in the protocol should throw only IOException. No field data of the protocol instance * is transmitted. */ @InterfaceAudience.LimitedPrivate(value = {"Common", "HDFS", "MapReduce", "Yarn"}) @InterfaceStability.Evolving public class RPC { static final int RPC_SERVICE_CLASS_DEFAULT = 0; public enum RpcKind { RPC_BUILTIN((short) 1), // Used for built in calls by tests RPC_WRITABLE((short) 2), // Use WritableRpcEngine RPC_PROTOCOL_BUFFER((short) 3); // Use ProtobufRpcEngine static final short MAX_INDEX = RPC_PROTOCOL_BUFFER.value; // used for array size public final short value; // TODO make it private RpcKind(short val) { this.value = val; } } interface RpcInvoker { /** * Process a client call on the server side * * @param server the server within whose context this rpc call is made * @param protocol - the protocol name (the class of the client proxy used to make calls to the * rpc server. * @param rpcRequest - deserialized * @param receiveTime time at which the call received (for metrics) * @return the call's return * @throws IOException */ public Writable call(Server server, String protocol, Writable rpcRequest, long receiveTime) throws Exception; } static final Log LOG = LogFactory.getLog(RPC.class); /** * Get all superInterfaces that extend VersionedProtocol * * @param childInterfaces * @return the super interfaces that extend VersionedProtocol */ static Class<?>[] getSuperInterfaces(Class<?>[] childInterfaces) { List<Class<?>> allInterfaces = new ArrayList<Class<?>>(); for (Class<?> childInterface : childInterfaces) { if (VersionedProtocol.class.isAssignableFrom(childInterface)) { allInterfaces.add(childInterface); allInterfaces.addAll(Arrays.asList(getSuperInterfaces(childInterface.getInterfaces()))); } else { LOG.warn( "Interface " + childInterface + " ignored because it does not extend VersionedProtocol"); } } return allInterfaces.toArray(new Class[allInterfaces.size()]); } /** * Get all interfaces that the given protocol implements or extends which are assignable from * VersionedProtocol. */ static Class<?>[] getProtocolInterfaces(Class<?> protocol) { Class<?>[] interfaces = protocol.getInterfaces(); return getSuperInterfaces(interfaces); } /** * Get the protocol name. If the protocol class has a ProtocolAnnotation, then get the protocol * name from the annotation; otherwise the class name is the protocol name. */ public static String getProtocolName(Class<?> protocol) { if (protocol == null) { return null; } ProtocolInfo anno = protocol.getAnnotation(ProtocolInfo.class); return (anno == null) ? protocol.getName() : anno.protocolName(); } /** * Get the protocol version from protocol class. If the protocol class has a ProtocolAnnotation, * then get the protocol name from the annotation; otherwise the class name is the protocol name. */ public static long getProtocolVersion(Class<?> protocol) { if (protocol == null) { throw new IllegalArgumentException("Null protocol"); } long version; ProtocolInfo anno = protocol.getAnnotation(ProtocolInfo.class); if (anno != null) { version = anno.protocolVersion(); if (version != -1) return version; } try { Field versionField = protocol.getField("versionID"); versionField.setAccessible(true); return versionField.getLong(protocol); } catch (NoSuchFieldException ex) { throw new RuntimeException(ex); } catch (IllegalAccessException ex) { throw new RuntimeException(ex); } } private RPC() {} // no public ctor // cache of RpcEngines by protocol private static final Map<Class<?>, RpcEngine> PROTOCOL_ENGINES = new HashMap<Class<?>, RpcEngine>(); private static final String ENGINE_PROP = "rpc.engine"; /** * Set a protocol to use a non-default RpcEngine. * * @param conf configuration to use * @param protocol the protocol interface * @param engine the RpcEngine impl */ public static void setProtocolEngine(Configuration conf, Class<?> protocol, Class<?> engine) { conf.setClass(ENGINE_PROP + "." + protocol.getName(), engine, RpcEngine.class); } // return the RpcEngine configured to handle a protocol static synchronized RpcEngine getProtocolEngine(Class<?> protocol, Configuration conf) { RpcEngine engine = PROTOCOL_ENGINES.get(protocol); if (engine == null) { Class<?> impl = conf.getClass(ENGINE_PROP + "." + protocol.getName(), WritableRpcEngine.class); engine = (RpcEngine) ReflectionUtils.newInstance(impl, conf); PROTOCOL_ENGINES.put(protocol, engine); } return engine; } /** A version mismatch for the RPC protocol. */ public static class VersionMismatch extends RpcServerException { private static final long serialVersionUID = 0; private String interfaceName; private long clientVersion; private long serverVersion; /** * Create a version mismatch exception * * @param interfaceName the name of the protocol mismatch * @param clientVersion the client's version of the protocol * @param serverVersion the server's version of the protocol */ public VersionMismatch(String interfaceName, long clientVersion, long serverVersion) { super( "Protocol " + interfaceName + " version mismatch. (client = " + clientVersion + ", server = " + serverVersion + ")"); this.interfaceName = interfaceName; this.clientVersion = clientVersion; this.serverVersion = serverVersion; } /** * Get the interface name * * @return the java class name (eg. org.apache.hadoop.mapred.InterTrackerProtocol) */ public String getInterfaceName() { return interfaceName; } /** Get the client's preferred version */ public long getClientVersion() { return clientVersion; } /** Get the server's agreed to version. */ public long getServerVersion() { return serverVersion; } /** get the rpc status corresponding to this exception */ public RpcStatusProto getRpcStatusProto() { return RpcStatusProto.ERROR; } /** get the detailed rpc status corresponding to this exception */ public RpcErrorCodeProto getRpcErrorCodeProto() { return RpcErrorCodeProto.ERROR_RPC_VERSION_MISMATCH; } } /** * Get a proxy connection to a remote server * * @param protocol protocol class * @param clientVersion client version * @param addr remote address * @param conf configuration to use * @return the proxy * @throws IOException if the far end through a RemoteException */ public static <T> T waitForProxy( Class<T> protocol, long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException { return waitForProtocolProxy(protocol, clientVersion, addr, conf).getProxy(); } /** * Get a protocol proxy that contains a proxy connection to a remote server and a set of methods * that are supported by the server * * @param protocol protocol class * @param clientVersion client version * @param addr remote address * @param conf configuration to use * @return the protocol proxy * @throws IOException if the far end through a RemoteException */ public static <T> ProtocolProxy<T> waitForProtocolProxy( Class<T> protocol, long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException { return waitForProtocolProxy(protocol, clientVersion, addr, conf, Long.MAX_VALUE); } /** * Get a proxy connection to a remote server * * @param protocol protocol class * @param clientVersion client version * @param addr remote address * @param conf configuration to use * @param connTimeout time in milliseconds before giving up * @return the proxy * @throws IOException if the far end through a RemoteException */ public static <T> T waitForProxy( Class<T> protocol, long clientVersion, InetSocketAddress addr, Configuration conf, long connTimeout) throws IOException { return waitForProtocolProxy(protocol, clientVersion, addr, conf, connTimeout).getProxy(); } /** * Get a protocol proxy that contains a proxy connection to a remote server and a set of methods * that are supported by the server * * @param protocol protocol class * @param clientVersion client version * @param addr remote address * @param conf configuration to use * @param connTimeout time in milliseconds before giving up * @return the protocol proxy * @throws IOException if the far end through a RemoteException */ public static <T> ProtocolProxy<T> waitForProtocolProxy( Class<T> protocol, long clientVersion, InetSocketAddress addr, Configuration conf, long connTimeout) throws IOException { return waitForProtocolProxy(protocol, clientVersion, addr, conf, 0, null, connTimeout); } /** * Get a proxy connection to a remote server * * @param protocol protocol class * @param clientVersion client version * @param addr remote address * @param conf configuration to use * @param rpcTimeout timeout for each RPC * @param timeout time in milliseconds before giving up * @return the proxy * @throws IOException if the far end through a RemoteException */ public static <T> T waitForProxy( Class<T> protocol, long clientVersion, InetSocketAddress addr, Configuration conf, int rpcTimeout, long timeout) throws IOException { return waitForProtocolProxy(protocol, clientVersion, addr, conf, rpcTimeout, null, timeout) .getProxy(); } /** * Get a protocol proxy that contains a proxy connection to a remote server and a set of methods * that are supported by the server * * @param protocol protocol class * @param clientVersion client version * @param addr remote address * @param conf configuration to use * @param rpcTimeout timeout for each RPC * @param timeout time in milliseconds before giving up * @return the proxy * @throws IOException if the far end through a RemoteException */ public static <T> ProtocolProxy<T> waitForProtocolProxy( Class<T> protocol, long clientVersion, InetSocketAddress addr, Configuration conf, int rpcTimeout, RetryPolicy connectionRetryPolicy, long timeout) throws IOException { long startTime = Time.now(); IOException ioe; while (true) { try { return getProtocolProxy( protocol, clientVersion, addr, UserGroupInformation.getCurrentUser(), conf, NetUtils.getDefaultSocketFactory(conf), rpcTimeout, connectionRetryPolicy); } catch (ConnectException se) { // namenode has not been started LOG.info("Server at " + addr + " not available yet, Zzzzz..."); ioe = se; } catch (SocketTimeoutException te) { // namenode is busy LOG.info("Problem connecting to server: " + addr); ioe = te; } catch (NoRouteToHostException nrthe) { // perhaps a VIP is failing over LOG.info("No route to host for server: " + addr); ioe = nrthe; } // check if timed out if (Time.now() - timeout >= startTime) { throw ioe; } // wait for retry try { Thread.sleep(1000); } catch (InterruptedException ie) { // IGNORE } } } /** * Construct a client-side proxy object that implements the named protocol, talking to a server at * the named address. * * @param <T> */ public static <T> T getProxy( Class<T> protocol, long clientVersion, InetSocketAddress addr, Configuration conf, SocketFactory factory) throws IOException { return getProtocolProxy(protocol, clientVersion, addr, conf, factory).getProxy(); } /** * Get a protocol proxy that contains a proxy connection to a remote server and a set of methods * that are supported by the server * * @param protocol protocol class * @param clientVersion client version * @param addr remote address * @param conf configuration to use * @param factory socket factory * @return the protocol proxy * @throws IOException if the far end through a RemoteException */ public static <T> ProtocolProxy<T> getProtocolProxy( Class<T> protocol, long clientVersion, InetSocketAddress addr, Configuration conf, SocketFactory factory) throws IOException { UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); return getProtocolProxy(protocol, clientVersion, addr, ugi, conf, factory); } /** * Construct a client-side proxy object that implements the named protocol, talking to a server at * the named address. * * @param <T> */ public static <T> T getProxy( Class<T> protocol, long clientVersion, InetSocketAddress addr, UserGroupInformation ticket, Configuration conf, SocketFactory factory) throws IOException { return getProtocolProxy(protocol, clientVersion, addr, ticket, conf, factory).getProxy(); } /** * Get a protocol proxy that contains a proxy connection to a remote server and a set of methods * that are supported by the server * * @param protocol protocol class * @param clientVersion client version * @param addr remote address * @param ticket user group information * @param conf configuration to use * @param factory socket factory * @return the protocol proxy * @throws IOException if the far end through a RemoteException */ public static <T> ProtocolProxy<T> getProtocolProxy( Class<T> protocol, long clientVersion, InetSocketAddress addr, UserGroupInformation ticket, Configuration conf, SocketFactory factory) throws IOException { return getProtocolProxy(protocol, clientVersion, addr, ticket, conf, factory, 0, null); } /** * Construct a client-side proxy that implements the named protocol, talking to a server at the * named address. * * @param <T> * @param protocol protocol * @param clientVersion client's version * @param addr server address * @param ticket security ticket * @param conf configuration * @param factory socket factory * @param rpcTimeout max time for each rpc; 0 means no timeout * @return the proxy * @throws IOException if any error occurs */ public static <T> T getProxy( Class<T> protocol, long clientVersion, InetSocketAddress addr, UserGroupInformation ticket, Configuration conf, SocketFactory factory, int rpcTimeout) throws IOException { return getProtocolProxy(protocol, clientVersion, addr, ticket, conf, factory, rpcTimeout, null) .getProxy(); } /** * Get a protocol proxy that contains a proxy connection to a remote server and a set of methods * that are supported by the server * * @param protocol protocol * @param clientVersion client's version * @param addr server address * @param ticket security ticket * @param conf configuration * @param factory socket factory * @param rpcTimeout max time for each rpc; 0 means no timeout * @return the proxy * @throws IOException if any error occurs */ public static <T> ProtocolProxy<T> getProtocolProxy( Class<T> protocol, long clientVersion, InetSocketAddress addr, UserGroupInformation ticket, Configuration conf, SocketFactory factory, int rpcTimeout, RetryPolicy connectionRetryPolicy) throws IOException { if (UserGroupInformation.isSecurityEnabled()) { SaslRpcServer.init(conf); } return getProtocolEngine(protocol, conf) .getProxy( protocol, clientVersion, addr, ticket, conf, factory, rpcTimeout, connectionRetryPolicy); } /** * Construct a client-side proxy object with the default SocketFactory * * @param <T> * @param protocol * @param clientVersion * @param addr * @param conf * @return a proxy instance * @throws IOException */ public static <T> T getProxy( Class<T> protocol, long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException { return getProtocolProxy(protocol, clientVersion, addr, conf).getProxy(); } /** Returns the server address for a given proxy. */ public static InetSocketAddress getServerAddress(Object proxy) { return getConnectionIdForProxy(proxy).getAddress(); } /** * Return the connection ID of the given object. If the provided object is in fact a protocol * translator, we'll get the connection ID of the underlying proxy object. * * @param proxy the proxy object to get the connection ID of. * @return the connection ID for the provided proxy object. */ public static ConnectionId getConnectionIdForProxy(Object proxy) { if (proxy instanceof ProtocolTranslator) { proxy = ((ProtocolTranslator) proxy).getUnderlyingProxyObject(); } RpcInvocationHandler inv = (RpcInvocationHandler) Proxy.getInvocationHandler(proxy); return inv.getConnectionId(); } /** * Get a protocol proxy that contains a proxy connection to a remote server and a set of methods * that are supported by the server * * @param protocol * @param clientVersion * @param addr * @param conf * @return a protocol proxy * @throws IOException */ public static <T> ProtocolProxy<T> getProtocolProxy( Class<T> protocol, long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException { return getProtocolProxy( protocol, clientVersion, addr, conf, NetUtils.getDefaultSocketFactory(conf)); } /** * Stop the proxy. Proxy must either implement {@link Closeable} or must have associated {@link * RpcInvocationHandler}. * * @param proxy the RPC proxy object to be stopped * @throws HadoopIllegalArgumentException if the proxy does not implement {@link Closeable} * interface or does not have closeable {@link InvocationHandler} */ public static void stopProxy(Object proxy) { if (proxy == null) { throw new HadoopIllegalArgumentException("Cannot close proxy since it is null"); } try { if (proxy instanceof Closeable) { ((Closeable) proxy).close(); return; } else { InvocationHandler handler = Proxy.getInvocationHandler(proxy); if (handler instanceof Closeable) { ((Closeable) handler).close(); return; } } } catch (IOException e) { LOG.error("Closing proxy or invocation handler caused exception", e); } catch (IllegalArgumentException e) { LOG.error("RPC.stopProxy called on non proxy: class=" + proxy.getClass().getName(), e); } // If you see this error on a mock object in a unit test you're // developing, make sure to use MockitoUtil.mockProtocol() to // create your mock. throw new HadoopIllegalArgumentException( "Cannot close proxy - is not Closeable or " + "does not provide closeable invocation handler " + proxy.getClass()); } /** Class to construct instances of RPC server with specific options. */ public static class Builder { private Class<?> protocol = null; private Object instance = null; private String bindAddress = "0.0.0.0"; private int port = 0; private int numHandlers = 1; private int numReaders = -1; private int queueSizePerHandler = -1; private boolean verbose = false; private final Configuration conf; private SecretManager<? extends TokenIdentifier> secretManager = null; private String portRangeConfig = null; public Builder(Configuration conf) { this.conf = conf; } /** Mandatory field */ public Builder setProtocol(Class<?> protocol) { this.protocol = protocol; return this; } /** Mandatory field */ public Builder setInstance(Object instance) { this.instance = instance; return this; } /** Default: 0.0.0.0 */ public Builder setBindAddress(String bindAddress) { this.bindAddress = bindAddress; return this; } /** Default: 0 */ public Builder setPort(int port) { this.port = port; return this; } /** Default: 1 */ public Builder setNumHandlers(int numHandlers) { this.numHandlers = numHandlers; return this; } /** Default: -1 */ public Builder setnumReaders(int numReaders) { this.numReaders = numReaders; return this; } /** Default: -1 */ public Builder setQueueSizePerHandler(int queueSizePerHandler) { this.queueSizePerHandler = queueSizePerHandler; return this; } /** Default: false */ public Builder setVerbose(boolean verbose) { this.verbose = verbose; return this; } /** Default: null */ public Builder setSecretManager(SecretManager<? extends TokenIdentifier> secretManager) { this.secretManager = secretManager; return this; } /** Default: null */ public Builder setPortRangeConfig(String portRangeConfig) { this.portRangeConfig = portRangeConfig; return this; } /** * Build the RPC Server. * * @throws IOException on error * @throws HadoopIllegalArgumentException when mandatory fields are not set */ public Server build() throws IOException, HadoopIllegalArgumentException { if (this.conf == null) { throw new HadoopIllegalArgumentException("conf is not set"); } if (this.protocol == null) { throw new HadoopIllegalArgumentException("protocol is not set"); } if (this.instance == null) { throw new HadoopIllegalArgumentException("instance is not set"); } return getProtocolEngine(this.protocol, this.conf) .getServer( this.protocol, this.instance, this.bindAddress, this.port, this.numHandlers, this.numReaders, this.queueSizePerHandler, this.verbose, this.conf, this.secretManager, this.portRangeConfig); } } /** An RPC Server. */ public abstract static class Server extends org.apache.hadoop.ipc.Server { boolean verbose; static String classNameBase(String className) { String[] names = className.split("\\.", -1); if (names == null || names.length == 0) { return className; } return names[names.length - 1]; } /** Store a map of protocol and version to its implementation */ /** The key in Map */ static class ProtoNameVer { final String protocol; final long version; ProtoNameVer(String protocol, long ver) { this.protocol = protocol; this.version = ver; } @Override public boolean equals(Object o) { if (o == null) return false; if (this == o) return true; if (!(o instanceof ProtoNameVer)) return false; ProtoNameVer pv = (ProtoNameVer) o; return ((pv.protocol.equals(this.protocol)) && (pv.version == this.version)); } @Override public int hashCode() { return protocol.hashCode() * 37 + (int) version; } } /** The value in map */ static class ProtoClassProtoImpl { final Class<?> protocolClass; final Object protocolImpl; ProtoClassProtoImpl(Class<?> protocolClass, Object protocolImpl) { this.protocolClass = protocolClass; this.protocolImpl = protocolImpl; } } ArrayList<Map<ProtoNameVer, ProtoClassProtoImpl>> protocolImplMapArray = new ArrayList<Map<ProtoNameVer, ProtoClassProtoImpl>>(RpcKind.MAX_INDEX); Map<ProtoNameVer, ProtoClassProtoImpl> getProtocolImplMap(RPC.RpcKind rpcKind) { if (protocolImplMapArray.size() == 0) { // initialize for all rpc kinds for (int i = 0; i <= RpcKind.MAX_INDEX; ++i) { protocolImplMapArray.add(new HashMap<ProtoNameVer, ProtoClassProtoImpl>(10)); } } return protocolImplMapArray.get(rpcKind.ordinal()); } // Register protocol and its impl for rpc calls void registerProtocolAndImpl(RpcKind rpcKind, Class<?> protocolClass, Object protocolImpl) { String protocolName = RPC.getProtocolName(protocolClass); long version; try { version = RPC.getProtocolVersion(protocolClass); } catch (Exception ex) { LOG.warn("Protocol " + protocolClass + " NOT registered as cannot get protocol version "); return; } getProtocolImplMap(rpcKind) .put( new ProtoNameVer(protocolName, version), new ProtoClassProtoImpl(protocolClass, protocolImpl)); LOG.debug( "RpcKind = " + rpcKind + " Protocol Name = " + protocolName + " version=" + version + " ProtocolImpl=" + protocolImpl.getClass().getName() + " protocolClass=" + protocolClass.getName()); } static class VerProtocolImpl { final long version; final ProtoClassProtoImpl protocolTarget; VerProtocolImpl(long ver, ProtoClassProtoImpl protocolTarget) { this.version = ver; this.protocolTarget = protocolTarget; } } VerProtocolImpl[] getSupportedProtocolVersions(RPC.RpcKind rpcKind, String protocolName) { VerProtocolImpl[] resultk = new VerProtocolImpl[getProtocolImplMap(rpcKind).size()]; int i = 0; for (Map.Entry<ProtoNameVer, ProtoClassProtoImpl> pv : getProtocolImplMap(rpcKind).entrySet()) { if (pv.getKey().protocol.equals(protocolName)) { resultk[i++] = new VerProtocolImpl(pv.getKey().version, pv.getValue()); } } if (i == 0) { return null; } VerProtocolImpl[] result = new VerProtocolImpl[i]; System.arraycopy(resultk, 0, result, 0, i); return result; } VerProtocolImpl getHighestSupportedProtocol(RpcKind rpcKind, String protocolName) { Long highestVersion = 0L; ProtoClassProtoImpl highest = null; if (LOG.isDebugEnabled()) { LOG.debug("Size of protoMap for " + rpcKind + " =" + getProtocolImplMap(rpcKind).size()); } for (Map.Entry<ProtoNameVer, ProtoClassProtoImpl> pv : getProtocolImplMap(rpcKind).entrySet()) { if (pv.getKey().protocol.equals(protocolName)) { if ((highest == null) || (pv.getKey().version > highestVersion)) { highest = pv.getValue(); highestVersion = pv.getKey().version; } } } if (highest == null) { return null; } return new VerProtocolImpl(highestVersion, highest); } protected Server( String bindAddress, int port, Class<? extends Writable> paramClass, int handlerCount, int numReaders, int queueSizePerHandler, Configuration conf, String serverName, SecretManager<? extends TokenIdentifier> secretManager, String portRangeConfig) throws IOException { super( bindAddress, port, paramClass, handlerCount, numReaders, queueSizePerHandler, conf, serverName, secretManager, portRangeConfig); initProtocolMetaInfo(conf); } private void initProtocolMetaInfo(Configuration conf) { RPC.setProtocolEngine(conf, ProtocolMetaInfoPB.class, ProtobufRpcEngine.class); ProtocolMetaInfoServerSideTranslatorPB xlator = new ProtocolMetaInfoServerSideTranslatorPB(this); BlockingService protocolInfoBlockingService = ProtocolInfoService.newReflectiveBlockingService(xlator); addProtocol( RpcKind.RPC_PROTOCOL_BUFFER, ProtocolMetaInfoPB.class, protocolInfoBlockingService); } /** * Add a protocol to the existing server. * * @param protocolClass - the protocol class * @param protocolImpl - the impl of the protocol that will be called * @return the server (for convenience) */ public Server addProtocol(RpcKind rpcKind, Class<?> protocolClass, Object protocolImpl) { registerProtocolAndImpl(rpcKind, protocolClass, protocolImpl); return this; } @Override public Writable call( RPC.RpcKind rpcKind, String protocol, Writable rpcRequest, long receiveTime) throws Exception { return getRpcInvoker(rpcKind).call(this, protocol, rpcRequest, receiveTime); } } }
/** @author Javier Paniza */ public class ColorTest extends ModuleTestBase { private static Log log = LogFactory.getLog(ColorTest.class); public ColorTest(String testName) { super(testName, "Color"); } public void testSharedReport() throws Exception { // we need that there is not any report execute("ExtendedPrint.myReports"); assertDialogTitle("My reports"); assertEditable("name"); assertNoAction("MyReport.createNew"); assertNoAction("MyReport.remove"); assertNoAction("MyReport.share"); // create a new report setValue("name", "This is an report to share"); checkRowCollection("columns", 2); checkRowCollection("columns", 3); checkRowCollection("columns", 4); checkRowCollection("columns", 5); execute("MyReport.removeColumn", "viewObject=xava_view_columns"); assertCollectionRowCount("columns", 2); execute("MyReport.editColumn", "row=1,viewObject=xava_view_columns"); setValue("value", "rojo"); execute("MyReport.saveColumn"); assertDialogTitle("My reports"); execute("MyReport.generatePdf"); assertNoDialog(); assertNoErrors(); // shared execute("ExtendedPrint.myReports"); assertAction("MyReport.createNew"); assertAction("MyReport.remove"); assertAction("MyReport.share"); assertValidValues( "name", new String[][] {{"This is an report to share", "This is an report to share"}}); execute("MyReport.share", "xava.keyProperty=name"); assertNoErrors(); assertDialog(); assertValidValues( "name", new String[][] { {"This is an report to share__SHARED_REPORT__", "This is an report to share (Shared)"} }); // delete execute("MyReport.remove", "xava.keyProperty=name"); assertNoErrors(); assertMessage("Report 'This is an report to share' removed"); assertEditable("name"); assertNoAction("MyReport.createNew"); assertNoAction("MyReport.remove"); assertNoAction("MyReport.share"); } public void testSubcontrollerOnChangeControllers() throws Exception { assertAction("ColorSub.firstAction"); execute("List.addColumns"); assertNoAction("ColorSub.firstAction"); } public void testSubcontroller() throws Exception { String linkXml = getHtmlPage().getHtmlElementById("ox_OpenXavaTest_Color__sc-a-ColorSub_list").asXml(); assertTrue(linkXml.contains("<i class=\"mdi mdi-run\"")); assertFalse(linkXml.contains("images/")); assertNoAction("ColorSub.fourAction"); execute("ColorSub.firstAction"); assertDialog(); closeDialog(); execute("Mode.detailAndFirst"); assertAction("ColorSub.fourAction"); HtmlElement container = getHtmlPage().getHtmlElementById("ox_OpenXavaTest_Color__sc-container-ColorSub_detail"); HtmlElement menu = getHtmlPage().getHtmlElementById("ox_OpenXavaTest_Color__sc-ColorSub_detail"); assertTrue("display:none;".equals(menu.getAttribute("style"))); assertTrue(container.asText().contains("My processes")); assertTrue(container.asText().contains("First action from subcontroller")); assertTrue(container.asText().contains("Second action")); assertTrue(container.asText().contains("Third action")); } public void testPrintPDF() throws Exception { execute("List.orderBy", "property=number"); checkRow(1); String number1 = getValueInList(1, 0); String name1 = getValueInList(1, 1); String hexValue1 = getValueInList(1, 2); String useTo1 = getValueInList(1, 3); String characteristicThing1 = getValueInList(1, 4); checkRow(5); String number5 = getValueInList(5, 0); String name5 = getValueInList(5, 1); String hexValue5 = getValueInList(5, 2); String useTo5 = getValueInList(5, 3); String characteristicThing5 = getValueInList(5, 4); execute("List.orderBy", "property=number"); checkRow(0); String number0 = getValueInList(0, 0); String name0 = getValueInList(0, 1); String hexValue0 = getValueInList(0, 2); String useTo0 = getValueInList(0, 3); String characteristicThing0 = getValueInList(0, 4); execute("Color.seeMessageSelected"); assertMessage("(before) Rows of selected colors [0]"); assertMessage( "(after) Rows of selected colors [{number=" + number1 + "}][{number=" + number5 + "}][{number=" + number0 + "}]"); execute("Print.generatePdf"); assertContentTypeForPopup("application/pdf"); assertPopupPDFLinesCount(7); assertPopupPDFLine(3, getPDFLine(number0, name0, hexValue0, useTo0, characteristicThing0)); assertPopupPDFLine(4, getPDFLine(number5, name5, hexValue5, useTo5, characteristicThing5)); assertPopupPDFLine(5, getPDFLine(number1, name1, hexValue1, useTo1, characteristicThing1)); } private String getPDFLine( String number, String name, String hexValue, String useTo, String characteristicThing) { String s = ""; s += Is.empty(number) ? "" : number + " "; s += Is.empty(name) ? "" : name + " "; s += Is.empty(hexValue) ? "" : hexValue + " "; s += Is.empty(useTo) ? "" : useTo + " "; s += Is.empty(characteristicThing) ? "" : characteristicThing + " "; return s.trim(); } public void testActionWithSelectedRowFromAnotherPage() throws Exception { checkRow(2); String number2 = getValueInList(2, 0); checkRow(6); String number6 = getValueInList(6, 0); execute("List.goNextPage"); checkRow(10); String number10 = getValueInList(0, 0); execute("List.goNextPage"); execute("Color.seeMessageSelected"); assertMessage("(before) Rows of selected colors [2][6][10]"); assertMessage( "(after) Rows of selected colors [{number=" + number2 + "}][{number=" + number6 + "}][{number=" + number10 + "}]"); assertNoErrors(); } public void testSelectedAllAndDeselectedAll() throws Exception { execute("List.orderBy", "property=number"); assertLabelInList(1, "Name"); assertTrue(getValueInList(0, 1).equals("ROJO")); checkAll(); assertAllChecked(); execute("List.orderBy", "property=number"); assertFalse(getValueInList(0, 1).equals("ROJO")); assertAllUnchecked(); execute("List.orderBy", "property=number"); assertTrue(getValueInList(0, 1).equals("ROJO")); uncheckRow(0); uncheckRow(5); execute("List.orderBy", "property=number"); assertFalse(getValueInList(0, 1).equals("ROJO")); assertAllUnchecked(); checkAll(); assertRowChecked(0); execute("List.orderBy", "property=number"); assertRowUnchecked(0); assertRowUnchecked(5); checkAll(); assertRowChecked(0); uncheckAll(); assertRowUnchecked(0); execute("List.orderBy", "property=number"); assertFalse(getValueInList(0, 1).equals("ROJO")); assertAllChecked(); } /* This test requires at least 6 pages (more than 50 elements) to work. When you did: select, change page, select, order, select and change page. It lost the selection */ public void testSelectAndOrderWithALotOfElements() throws Exception { execute("List.orderBy", "property=number"); checkRow(0); checkRow(1); execute("List.goPage", "page=2"); checkRow(12); checkRow(13); execute("List.goPage", "page=1"); assertRowUnchecked(2); assertRowUnchecked(3); execute("List.orderBy", "property=number"); assertRowUnchecked(0); assertRowUnchecked(1); assertRowUnchecked(2); assertRowUnchecked(3); execute("List.goPage", "page=2"); assertRowUnchecked(10); assertRowUnchecked(11); assertRowUnchecked(12); assertRowUnchecked(13); execute("List.goPage", "page=1"); checkRow(4); execute("List.orderBy", "property=number"); assertRowChecked(0); assertRowChecked(1); assertRowUnchecked(2); assertRowUnchecked(3); assertRowUnchecked(4); execute("List.goPage", "page=2"); assertRowUnchecked(10); assertRowUnchecked(11); assertRowChecked(12); assertRowChecked(13); assertRowUnchecked(14); execute("List.orderBy", "property=number"); assertRowUnchecked(10); assertRowUnchecked(11); assertRowUnchecked(12); assertRowUnchecked(13); assertRowUnchecked(14); execute("List.goPage", "page=1"); assertRowUnchecked(0); assertRowUnchecked(1); assertRowUnchecked(2); assertRowUnchecked(3); assertRowChecked(4); } public void testNavigationByKeyZero() throws Exception { assertLabelInList(0, "Number"); assertValueInList(0, 0, "0"); assertValueInList(1, 0, "1"); execute("List.viewDetail", "row=1"); assertValue("number", "1"); assertValue("name", "NEGRO"); execute("Navigation.previous"); assertValue("number", "0"); assertValue("name", "ROJO"); execute("Navigation.previous"); assertError("We already are at the beginning of the list"); assertValue("number", "0"); assertValue("name", "ROJO"); } public void testKeysWithZeroValue() throws Exception { assertValueInList(0, "number", "0"); assertValueInList(0, "name", "ROJO"); execute("Mode.detailAndFirst"); assertNoErrors(); assertValue("number", "0"); assertValue("name", "ROJO"); assertValue("sample", "RED"); } public void testMessageScapedWithQuotes() throws Exception { assertListNotEmpty(); execute("List.viewDetail", "row=0"); execute("Color.seeMessage"); assertMessage("Message: A.B.C"); } public void testIdentityCalculator() throws Exception { execute("CRUD.new"); assertNoErrors(); setValue("number", "-1"); // needed in this case because 0 is an existing key setValue("name", "JUNIT COLOR " + (int) (Math.random() * 200)); execute("TypicalNotResetOnSave.save"); assertNoErrors(); String last = getValue("number"); execute("CRUD.new"); assertNoErrors(); setValue("number", "-1"); // needed in this case because 0 is an existing key setValue("name", "JUNIT COLOR " + (int) (Math.random() * 200)); execute("TypicalNotResetOnSave.save"); assertNoErrors(); String next = String.valueOf(Integer.parseInt(last) + 1); assertValue("number", next); } public void testOptimisticConcurrency() throws Exception { // Must be called 2 times in order to fix some problems on second time modifyColorFromFirstUser(1); modifyColorFromFirstUser(2); } public void testFilterByNumberZero() throws Exception { setConditionValues(new String[] {"0"}); execute("List.filter"); assertListRowCount(1); } public void modifyColorFromFirstUser(int id) throws Exception { // First user execute("List.viewDetail", "row=2"); assertNotExists("version"); setValue("name", "COLOR A" + id); // Second user, it's faster, he wins ColorTest otherSession = new ColorTest("Color2"); otherSession.modifyColorFromSecondUser(id); // The first user continues execute("TypicalNotResetOnSave.save"); assertError("Impossible to execute Save action: Another user has modified this record"); execute("Mode.list"); assertValueInList(2, "name", "COLOR B" + id); // The second user won } private void modifyColorFromSecondUser(int id) throws Exception { setUp(); execute("List.viewDetail", "row=2"); setValue("name", "COLOR B" + id); execute("TypicalNotResetOnSave.save"); assertNoErrors(); tearDown(); } public void testFilterDescriptionsList_forTabsAndNotForTabs() throws Exception { try { CharacteristicThing.findByNumber(2); } catch (NoResultException ex) { fail("It must to exist"); } // Color: 'usedTo' without descriptionsList and 'characteristicThing' without descriptionsList assertLabelInList(4, "Name of Used to"); assertLabelInList(5, "Characteristic thing"); assertValueInList(0, 4, "CAR"); assertValueInList(0, 5, "3 PLACES"); setConditionValues(new String[] {"", "", "", "CAR", "3 PLACES"}); execute("List.filter"); assertNoErrors(); assertListRowCount(1); // Color2: 'usedTo' with descriptionsList and 'characteristicThing' with descriptionsList and // condition changeModule("Color2"); assertLabelInList(4, "Name of Used to"); assertLabelInList(5, "Characteristic thing"); assertValueInList(0, 4, "CAR"); assertValueInList(0, 5, "3 PLACES"); setConditionValues(new String[] {"", "", "", "1", "0"}); execute("List.filter"); assertNoErrors(); assertListRowCount(1); try { setConditionValues( new String[] {"", "", "", "", "2"}); // descriptionsList has a condition: number < 2 } catch (IllegalArgumentException ex) { assertTrue(ex.getMessage().equals("No option found with value: 2")); } } public void testShowActionOnlyInEachRow() throws Exception { // confirmMessage with row String html = getHtml(); assertTrue(html.contains("Delete record on row 2: Are you sure?")); // action with mode=NONE: it display only in each row assertAction("CRUD.deleteRow"); setConditionValues(new String[] {"", "ZZZZZ"}); execute("List.filter"); assertListRowCount(0); assertNoAction("CRUD.deleteRow"); } public void testIgnoreAccentsForStringArgumentsInTheFilter() throws Exception { // create record with name 'marrón' execute("CRUD.new"); setValue("name", "marrón"); execute("TypicalNotResetOnSave.save"); assertNoErrors(); // filter by 'marron' execute("Mode.list"); setConditionValues("", "marron"); execute("List.filter"); assertListRowCount(1); assertValueInList(0, 1, "MARRÓN"); // filter by 'marrón' setConditionValues("", ""); execute("List.filter"); assertListRowCount(10); setConditionValues("", "marrón"); execute("List.filter"); assertListRowCount(1); assertValueInList(0, 1, "MARRÓN"); // delete checkAll(); execute("CRUD.deleteSelected"); assertNoErrors(); assertListRowCount(0); } public void testChangeModelNameInConditions() throws Exception { execute("CRUD.new"); assertNoErrors(); assertExists("anotherCT.number"); assertValidValuesCount("anotherCT.number", 3); String[][] validValues = { {"", ""}, {"0", "3 PLACES"}, {"1", "5 PLACES"} }; assertValidValues("anotherCT.number", validValues); } public void testDescriptionsListWithMultipleKeyAndOneValueInBlank() throws Exception { execute("List.viewDetail", "row=0"); assertExists("mixture.KEY"); String[][] validValues = { {"", ""}, {"[. .VERDE .]", "----------&-----VERDE:complicated"}, {"[.ROJO . .]", "------ROJO&----------:simple"} }; assertValidValues("mixture.KEY", validValues); setValue("mixture.KEY", "[. .VERDE .]"); execute("TypicalNotResetOnSave.save"); assertNoErrors(); assertMessage("Color modified successfully"); assertValue("mixture.KEY", "[. .VERDE .]"); setValue("mixture.KEY", ""); execute("TypicalNotResetOnSave.save"); assertNoErrors(); assertMessage("Color modified successfully"); assertValue("mixture.KEY", ""); } public void testFilterByString() throws Exception { assertLabelInList(1, "Name"); assertLabelInList(5, "Characteristic thing"); setConditionValues("", "", "", "", "3 places"); execute("List.filter"); assertListRowCount(1); assertValueInList(0, 1, "ROJO"); setConditionComparators( "=", "not_contains_comparator", "starts_comparator", "starts_comparator", "contains_comparator"); setConditionValues("", "ROJO", "", "", ""); execute("List.filter"); assertListNotEmpty(); setConditionComparators( "=", "not_contains_comparator", "starts_comparator", "starts_comparator", "contains_comparator"); setConditionValues("", "ROJO", "", "", "3 places"); execute("List.filter"); assertListRowCount(0); setConditionComparators( "=", "ends_comparator", "starts_comparator", "starts_comparator", "starts_comparator"); setConditionValues("", "O", "", "", ""); execute("List.filter"); assertListRowCount(2); assertValueInList(0, 1, "ROJO"); assertValueInList(1, 1, "NEGRO"); } }