@Service public abstract class FantasyGame { private static final Log LOG = LogFactory.getLog(FantasyGame.class); protected FantasyTeam fantasyTeam; public FantasyGame() {} public void tradeForTomorrow() throws Exception { tradeForDate(DateUtil.getGameTomorrow()); } public void tradeForDate(Date date) throws Exception { BbcLeague bbcLeague = getLeague(date); Starters expectedStarters = fantasyTeam.getStrategy().pickStarters(date, bbcLeague); Starters actualStarters = tradeForStarters(expectedStarters); boolean startersSet = actualStarters.equals(expectedStarters); System.out.println("starters look good - " + startersSet); } public abstract BbcLeague getLeague(Date date) throws IOException; public abstract Starters tradeForStarters(Starters starters) throws IOException; }
/** * Create on 28/02/2013 (12:46:06) * * @author Ana Andres */ public class SeeMessageSelectedColorsAction extends BaseAction { private static Log log = LogFactory.getLog(SeeMessageSelectedColorsAction.class); @Inject public Tab tab; public void execute() throws Exception { int[] selected = getTab().getSelected(); // test the old method Map[] selectedKeys = getTab().getSelectedKeys(); if (selected == null || selectedKeys == null) return; String m = ""; String o = ""; for (int i = 0; i < selected.length; i++) m += "[" + selected[i] + "]"; for (int i = 0; i < selectedKeys.length; i++) o += "[" + selectedKeys[i] + "]"; addMessage("color_selected_old", m); addMessage("color_selected_new", "'" + o + "'"); } public Tab getTab() { return tab; } public void setTab(Tab tab) { this.tab = tab; } }
private void logSomething(boolean expectedDebug) { Log log = LogFactory.getLog(Object.class); log.warn("Warning message."); log.debug("Debug message."); log.error("Error message."); log.error("Error with Exception.", new Exception("Test exception.")); assertEquals(expectedDebug, log.isDebugEnabled()); }
public IPSeeker() { /* this.INSTALL_DIR=dir; this.IP_FILE=fileName; */ ipCache = new HashMap<String, IPLocation>(); loc = new IPLocation(); buf = new byte[100]; b4 = new byte[4]; b3 = new byte[3]; try { ipFile = new RandomAccessFile(IP_FILE, "r"); } catch (FileNotFoundException e) { // 如果找不到这个文件,再尝试再当前目录下搜索,这次全部改用小写文件名 // 因为有些系统可能区分大小写导致找不到ip地址信息文件 String filename = new File(IP_FILE).getName().toLowerCase(); File[] files = new File(INSTALL_DIR).listFiles(); for (int i = 0; i < files.length; i++) { if (files[i].isFile()) { if (files[i].getName().toLowerCase().equals(filename)) { try { ipFile = new RandomAccessFile(files[i], "r"); } catch (FileNotFoundException e1) { LogFactory.log("IP地址信息文件没有找到,IP显示功能将无法使用", Level.ERROR, e1); ipFile = null; } break; } } } } // 如果打开文件成功,读取文件头信息 if (ipFile != null) { try { ipBegin = readLong4(0); ipEnd = readLong4(4); if (ipBegin == -1 || ipEnd == -1) { ipFile.close(); ipFile = null; } } catch (IOException e) { LogFactory.log("IP地址信息文件格式有错误,IP显示功能将无法使用", Level.ERROR, e); ipFile = null; } } }
/** * A helper to load the native hadoop code i.e. libhadoop.so. This handles the fallback to either * the bundled libhadoop-Linux-i386-32.so or the the default java implementations where appropriate. */ public class NativeCodeLoader { private static final Log LOG = LogFactory.getLog("org.apache.hadoop.util.NativeCodeLoader"); private static boolean nativeCodeLoaded = false; static { // Try to load native hadoop library and set fallback flag appropriately LOG.debug("Trying to load the custom-built native-hadoop library..."); try { System.loadLibrary("hadoop"); LOG.info("Loaded the native-hadoop library"); nativeCodeLoaded = true; } catch (Throwable t) { // Ignore failure to load LOG.debug("Failed to load native-hadoop with error: " + t); LOG.debug("java.library.path=" + System.getProperty("java.library.path")); } if (!nativeCodeLoaded) { LOG.warn( "Unable to load native-hadoop library for your platform... " + "using builtin-java classes where applicable"); } } /** * Check if native-hadoop code is loaded for this platform. * * @return <code>true</code> if native-hadoop is loaded, else <code>false</code> */ public static boolean isNativeCodeLoaded() { return nativeCodeLoaded; } /** * Return if native hadoop libraries, if present, can be used for this job. * * @param jobConf job configuration * @return <code>true</code> if native hadoop libraries, if present, can be used for this job; * <code>false</code> otherwise. */ public boolean getLoadNativeLibraries(JobConf jobConf) { return jobConf.getBoolean("hadoop.native.lib", true); } /** * Set if native hadoop libraries, if present, can be used for this job. * * @param jobConf job configuration * @param loadNativeLibraries can native hadoop libraries be loaded */ public void setLoadNativeLibraries(JobConf jobConf, boolean loadNativeLibraries) { jobConf.setBoolean("hadoop.native.lib", loadNativeLibraries); } }
// This happens very early - check it. static { try { log = LogFactory.getLog(Servlet.class); } catch (Exception ex) { System.err.println("Exception creating the logger"); System.err.println("Commons logging jar files in WEB-INF/lib/?"); System.err.println(ex.getMessage()); // ex.printStackTrace(System.err) ; } }
/** * 从内存映射文件的offset位置得到一个0结尾字符串 * * @param offset 字符串起始偏移 * @return 读取的字符串,出错返回空字符串 */ private String readString(int offset) { try { mbb.position(offset); int i; for (i = 0, buf[i] = mbb.get(); buf[i] != 0; buf[++i] = mbb.get()) ; if (i != 0) return Util.getString(buf, 0, i, "GBK"); } catch (IllegalArgumentException e) { LogFactory.log("", Level.ERROR, e); } return ""; }
/** * 从offset偏移处读取一个以0结束的字符串 * * @param offset 字符串起始偏移 * @return 读取的字符串,出错返回空字符串 */ private String readString(long offset) { try { ipFile.seek(offset); int i; for (i = 0, buf[i] = ipFile.readByte(); buf[i] != 0; buf[++i] = ipFile.readByte()) ; if (i != 0) return Util.getString(buf, 0, i, "GBK"); } catch (IOException e) { LogFactory.log("", Level.ERROR, e); } return ""; }
/** @author Javier Paniza */ public class DescriptionsListTag extends TagSupport { private static Log log = LogFactory.getLog(DescriptionsListTag.class); private String reference; public int doStartTag() throws JspException { try { HttpServletRequest request = (HttpServletRequest) pageContext.getRequest(); ModuleContext context = (ModuleContext) request.getSession().getAttribute("context"); String viewObject = request.getParameter("viewObject"); viewObject = (viewObject == null || viewObject.equals("")) ? "xava_view" : viewObject; View view = (View) context.get(request, viewObject); MetaReference metaReference = view.getMetaReference(reference).cloneMetaReference(); metaReference.setName(reference); String prefix = request.getParameter("propertyPrefix"); prefix = prefix == null ? "" : prefix; String application = request.getParameter("application"); String module = request.getParameter("module"); String referenceKey = Ids.decorate(application, module, prefix + reference); request.setAttribute(referenceKey, metaReference); String editorURL = "reference.jsp?referenceKey=" + referenceKey + "&onlyEditor=true&frame=false&composite=false&descriptionsList=true"; String editorPrefix = Module.isPortlet() ? "/WEB-INF/jsp/xava/" : "/xava/"; try { pageContext.include(editorPrefix + editorURL); } catch (ServletException ex) { Throwable cause = ex.getRootCause() == null ? ex : ex.getRootCause(); log.error(cause.getMessage(), cause); pageContext.include(editorPrefix + "editors/notAvailableEditor.jsp"); } catch (Exception ex) { log.error(ex.getMessage(), ex); pageContext.include(editorPrefix + "editors/notAvailableEditor.jsp"); } } catch (Exception ex) { log.error(ex.getMessage(), ex); throw new JspException(XavaResources.getString("descriptionsList_tag_error", reference)); } return SKIP_BODY; } public String getReference() { return reference; } public void setReference(String property) { this.reference = property; } }
public static void main(String args[]) throws IOException { Log log = LogFactory.getLog(Client.class); ApplicationContext context = new AnnotationConfigApplicationContext(ClientConfig.class); SalesCalculator salesCalculator = context.getBean(SalesCalculator.class); String[] products = new String[] {"Apple iPad", "Apple iPod", "Apple macBook"}; for (String productName : products) { BigDecimal total = salesCalculator.totalSalesForProduct(productName); log.info("total sales for " + productName + " = $" + total); } }
/** * Class that holds a 64 bit salt value for crypto operations. * * <p>This class tries to use the SecureRandom class to initialize the salt value. If SecureRandom * is not available the class Random is used. * * @author Jochen Katz * @version 1.0 */ class Salt { private long salt; private static Salt instance = null; private static final LogAdapter logger = LogFactory.getLogger(Salt.class); /** Default constructor, initializes the salt to a random value. */ protected Salt() { byte[] rnd = new byte[8]; try { SecureRandom sr = SecureRandom.getInstance("SHA1PRNG"); sr.nextBytes(rnd); } catch (NoSuchAlgorithmException nsae) { logger.warn("Could not use SecureRandom. Using Random instead."); Random r = new Random(); r.nextBytes(rnd); } salt = rnd[0]; for (int i = 0; i < 7; i++) { salt = (salt * 256) + ((int) rnd[i]) + 128; } if (logger.isDebugEnabled() == true) { logger.debug("Initialized Salt to " + Long.toHexString(salt) + "."); } } /** * Get a initialized Salt object. * * @return the Salt object */ public static Salt getInstance() { if (instance == null) { instance = new Salt(); } return instance; } /** * Get the next value of the salt. * * @return previous value increased by one. */ public synchronized long getNext() { return salt++; } }
/** * 从offset位置读取四个字节的ip地址放入ip数组中,读取后的ip为big-endian格式,但是 文件中是little-endian形式,将会进行转换 * * @param offset * @param ip */ private void readIP(long offset, byte[] ip) { try { ipFile.seek(offset); ipFile.readFully(ip); byte temp = ip[0]; ip[0] = ip[3]; ip[3] = temp; temp = ip[1]; ip[1] = ip[2]; ip[2] = temp; } catch (IOException e) { LogFactory.log("", Level.ERROR, e); } }
// Clase UsuariosMunicipio // Lee el usuario. public class UsuariosMunicipio { private static Log log = LogFactory.getLog(UsuariosMunicipio.class); private static final ThreadLocal municipioUsuario = new ThreadLocal(); private static final ThreadLocal municipioUsuarioInfo = new ThreadLocal(); public static String getMunicipioUsuario() { return (String) municipioUsuario.get(); } public static UserInfo getMunicipioUsuarioInfo() { UserInfo userInfo = (UserInfo) municipioUsuarioInfo.get(); if (userInfo == null) userInfo = new UserInfo(); userInfo.setId(getMunicipioUsuario()); return userInfo; } public static void setMunicipioUsuario(String userName) { municipioUsuario.set(userName); municipioUsuarioInfo.set(null); } public static void setCurrentUserInfo(UserInfo userInfo) { municipioUsuario.set(userInfo.getId()); municipioUsuarioInfo.set(userInfo); } public static void setCurrent(HttpServletRequest request) { Object rundata = request.getAttribute("rundata"); String portalUser = (String) request.getSession().getAttribute("xava.portal.user"); String webUser = (String) request.getSession().getAttribute("xava.user"); String user = portalUser == null ? webUser : portalUser; if (Is.emptyString(user) && rundata != null) { PropertiesManager pmRundata = new PropertiesManager(rundata); try { Object jetspeedUser = pmRundata.executeGet("user"); PropertiesManager pmUser = new PropertiesManager(jetspeedUser); user = (String) pmUser.executeGet("userName"); } catch (Exception ex) { log.warn(XavaResources.getString("warning_get_user"), ex); user = null; } } municipioUsuario.set(user); request.getSession().setAttribute("xava.user", user); municipioUsuarioInfo.set(request.getSession().getAttribute("xava.portal.userinfo")); } }
public class GameTimeoutAction implements V086GameEventHandler { private static Log log = LogFactory.getLog(GameTimeoutAction.class); private static final String desc = "GameTimeoutAction"; private static GameTimeoutAction singleton = new GameTimeoutAction(); public static GameTimeoutAction getInstance() { return singleton; } private int handledCount = 0; private GameTimeoutAction() {} public int getHandledEventCount() { return handledCount; } public String toString() { return desc; } public void handleEvent(GameEvent event, V086Controller.V086ClientHandler clientHandler) { handledCount++; GameTimeoutEvent timeoutEvent = (GameTimeoutEvent) event; KailleraUser player = timeoutEvent.getUser(); KailleraUser user = clientHandler.getUser(); if (player.equals(user)) { log.debug( user + " received timeout event " + timeoutEvent.getTimeoutNumber() + " for " + timeoutEvent.getGame() + ": resending messages..."); clientHandler.resend(timeoutEvent.getTimeoutNumber()); } else { log.debug( user + " received timeout event " + timeoutEvent.getTimeoutNumber() + " from " + player + " for " + timeoutEvent.getGame()); } } }
/** * Access log for a tri-state system property. * * <p>Need to first convert override value to a log level, taking care to interpret a range of * values between BRIEF, VERBOSE and SILENT. * * <p>An override < 0 is interpreted to mean that the logging configuration should not be * overridden. The level passed to the factories createLog method will be null in this case. * * <p>Note that if oldLogName is null and old logging is on, the returned LogStreamLog will ignore * the override parameter - the log will never log messages. This permits new logs that only write * to Loggers to do nothing when old logging is active. * * <p>Do not call getLog multiple times on the same logger name. Since this is an internal API, no * checks are made to ensure that multiple logs do not exist for the same logger. */ public static Log getLog(String loggerName, String oldLogName, int override) { Level level; if (override < 0) { level = null; } else if (override == LogStream.SILENT) { level = Level.OFF; } else if ((override > LogStream.SILENT) && (override <= LogStream.BRIEF)) { level = BRIEF; } else if ((override > LogStream.BRIEF) && (override <= LogStream.VERBOSE)) { level = VERBOSE; } else { level = Level.FINEST; } return logFactory.createLog(loggerName, oldLogName, level); }
public class BSONFileOutputFormat<K, V> extends OutputFormat<K, V> { public void checkOutputSpecs(final JobContext context) {} public OutputCommitter getOutputCommitter(final TaskAttemptContext context) { return new MongoOutputCommiter(); } @Override public RecordWriter<K, V> getRecordWriter(final TaskAttemptContext context) { return new BSONFileRecordWriter(context); } public BSONFileOutputFormat() {} private static final Log LOG = LogFactory.getLog(BSONFileOutputFormat.class); }
public class GameDesynchAction implements V086GameEventHandler { private static Log log = LogFactory.getLog(GameDesynchAction.class); private static final String desc = "GameDesynchAction"; // $NON-NLS-1$ private static GameDesynchAction singleton = new GameDesynchAction(); public static GameDesynchAction getInstance() { return singleton; } private int handledCount = 0; private GameDesynchAction() {} public int getHandledEventCount() { return handledCount; } public String toString() { return desc; } public void handleEvent(GameEvent event, V086Controller.V086ClientHandler clientHandler) { handledCount++; GameDesynchEvent desynchEvent = (GameDesynchEvent) event; try { clientHandler.send( new GameChat_Notification( clientHandler.getNextMessageNumber(), EmuLang.getString("GameDesynchAction.DesynchDetected"), desynchEvent.getMessage())); // $NON-NLS-1$ // if (clientHandler.getUser().getStatus() == KailleraUser.STATUS_PLAYING) // clientHandler.getUser().dropGame(); } catch (MessageFormatException e) { log.error( "Failed to contruct GameChat_Notification message: " + e.getMessage(), e); // $NON-NLS-1$ } // catch (DropGameException e) // { // log.error("Failed to drop game during desynch: " + e.getMessage(), e); // } } }
/** @author jclopez */ public class AccountEntryValidator implements Validator { private static final Log log = LogFactory.getLog(EuroValidator.class); /** */ public void validate(FacesContext context, UIComponent component, Object value) throws ValidatorException { log.info("validate - value = " + value); if (value != null) { // Check if value is a BigDecimal if (!(value instanceof BigDecimal)) { log.info("validate - value is not a BigDecimal (" + value.getClass().getName() + ")"); throw new ValidatorException( new FacesMessage("Las cantidades monetarias deben ser de tipo BigDecimal")); } // Check if it has no more than 2 decimal digits BigDecimal bd = (BigDecimal) value; if (bd.scale() > 2) { log.info("validate - value has more than 2 decimals (" + value + ")"); throw new ValidatorException( new FacesMessage("Las cantidades monetarias no pueden tener mas de dos decimales")); } AccountEntryBean bean = (AccountEntryBean) FacesUtils.getBean("accountEntryBean"); AccountEntryType type = bean.getType(); AccountEntryGroup group = type.getGroup(); if (group.getId() == ConfigurationUtil.getDefault().getCostId()) { if (bd.signum() != -1) { log.info("validate - value cost is negative (" + value + ")"); throw new ValidatorException(new FacesMessage("La cantidad debe ser negativa")); } } if (group.getId() == ConfigurationUtil.getDefault().getIncomeId()) { if (bd.signum() != 1) { log.info("validate - value incom is positive (" + value + ")"); throw new ValidatorException(new FacesMessage("La cantidad debe ser positiva")); } } } } }
/** * 给定一个地点的不完全名字,得到一系列包含s子串的IP范围记录 * * @param s 地点子串 * @return 包含IPEntry类型的List */ public List<IPEntry> getIPEntries(String s) { List<IPEntry> ret = new ArrayList<IPEntry>(); try { // 映射IP信息文件到内存中 if (mbb == null) { FileChannel fc = ipFile.getChannel(); mbb = fc.map(FileChannel.MapMode.READ_ONLY, 0, ipFile.length()); mbb.order(ByteOrder.LITTLE_ENDIAN); } int endOffset = (int) ipEnd; for (int offset = (int) ipBegin + 4; offset <= endOffset; offset += IP_RECORD_LENGTH) { int temp = readInt3(offset); if (temp != -1) { IPLocation ipLoc = getIPLocation(temp); // 判断是否这个地点里面包含了s子串,如果包含了,添加这个记录到List中,如果没有,继续 if (ipLoc.getCountry().indexOf(s) != -1 || ipLoc.getArea().indexOf(s) != -1) { IPEntry entry = new IPEntry(); entry.country = ipLoc.getCountry(); entry.area = ipLoc.getArea(); // 得到起始IP readIP(offset - 4, b4); entry.beginIp = Util.getIpStringFromBytes(b4); // 得到结束IP readIP(temp, b4); entry.endIp = Util.getIpStringFromBytes(b4); // 添加该记录 ret.add(entry); } } } } catch (IOException e) { LogFactory.log("", Level.ERROR, e); } return ret; }
/** @author Javier Paniza */ public abstract class ModelMapping implements java.io.Serializable { private static Log log = LogFactory.getLog(ModelMapping.class); private static boolean codeGenerationTime; private static boolean codeGenerationTimeObtained = false; private MetaComponent metaComponent; private String table; private Map propertyMappings = new HashMap(); private Map referenceMappings; private Collection modelProperties = new ArrayList(); // of String private Collection tableColumns = new ArrayList(); // of String private Collection referenceMappingsWithConverter; // of ReferenceMapping private boolean databaseMetadataLoaded = false; private boolean supportsSchemasInDataManipulation = true; private boolean supportsYearFunction = false; private boolean supportsMonthFunction = false; private boolean supportsTranslateFunction = false; private boolean referencePropertyWithFormula = false; public abstract String getModelName() throws XavaException; public abstract MetaModel getMetaModel() throws XavaException; /** Util specially to find out the type of properties that are not in model, only in mapping. */ public Class getType(String propertyName) throws XavaException { try { return getMetaModel().getMetaProperty(propertyName).getType(); } catch (ElementNotFoundException ex) { // Try to obtain it from primary key if (!(getMetaModel() instanceof MetaEntity)) return java.lang.Object.class; throw ex; } } public String getTable() { // Change this if by polymorphism ? if (isCodeGenerationTime()) return table; if (XavaPreferences.getInstance().isJPAPersistence() && getSchema() == null && !Is.emptyString(XPersistence.getDefaultSchema())) { return XPersistence.getDefaultSchema() + "." + table; } else if (XavaPreferences.getInstance().isHibernatePersistence() && getSchema() == null && !Is.emptyString(XHibernate.getDefaultSchema())) { return XHibernate.getDefaultSchema() + "." + table; } return table; } private static boolean isCodeGenerationTime() { if (!codeGenerationTimeObtained) { codeGenerationTimeObtained = true; try { // Class.forName("CodeGenerator"); ClassLoaderUtil.forName(ModelMapping.class, "CodeGenerator"); codeGenerationTime = true; } catch (Exception ex) { codeGenerationTime = false; } } return codeGenerationTime; } public void setTable(String tabla) { this.table = tabla; } public String getSchema() { int idx = table.indexOf('.'); if (idx < 0) return null; return table.substring(0, idx); } public String getUnqualifiedTable() { int idx = table.indexOf('.'); if (idx < 0) return table; return table.substring(idx + 1); } public String getTableToQualifyColumn() { return supportsSchemasInDataManipulation() ? getTable() : getUnqualifiedTable(); } public void addPropertyMapping(PropertyMapping propertyMapping) throws XavaException { propertyMappings.put(propertyMapping.getProperty(), propertyMapping); modelProperties.add(propertyMapping.getProperty()); // To keep order tableColumns.add(propertyMapping.getColumn()); if (propertyMapping.hasFormula() && !getMetaModel().isAnnotatedEJB3()) { propertyMapping.getMetaProperty().setReadOnly(true); } } public void addReferenceMapping(ReferenceMapping referenceMapping) throws XavaException { if (referenceMappings == null) referenceMappings = new HashMap(); referenceMappings.put(referenceMapping.getReference(), referenceMapping); referenceMapping.setContainer(this); } /** @return Not null */ public ReferenceMapping getReferenceMapping(String name) throws XavaException, ElementNotFoundException { ReferenceMapping r = referenceMappings == null ? null : (ReferenceMapping) referenceMappings.get(name); if (r == null) { throw new ElementNotFoundException("reference_mapping_not_found", name, getModelName()); } return r; } /** @return Not null */ public PropertyMapping getPropertyMapping(String name) throws XavaException, ElementNotFoundException { int i = name.indexOf('.'); if (i >= 0) { String rName = name.substring(0, i); String pName = name.substring(i + 1); if (isReferenceNameInReferenceMappings(rName)) { return getReferenceMapping(rName).getReferencedMapping().getPropertyMapping(pName); } else { // by embedded references: address.city -> address_city return getPropertyMapping(name.replace(".", "_")); } } PropertyMapping p = propertyMappings == null ? null : (PropertyMapping) propertyMappings.get(name); if (p == null) { throw new ElementNotFoundException("property_mapping_not_found", name, getModelName()); } return p; } private boolean isReferenceNameInReferenceMappings(String referenceName) { Collection<ReferenceMapping> col = getReferenceMappings(); for (ReferenceMapping rm : col) if (rm.getReference().equals(referenceName)) return true; return false; } /** * In the order that they was added. * * @return Collection of <tt>String</tt>. */ public Collection getModelProperties() { return modelProperties; } /** * In the order that they was added. * * @return Collection of <tt>String</tt>. */ public Collection getColumns() { return tableColumns; } public String getKeyColumnsAsString() throws XavaException { StringBuffer r = new StringBuffer(); Collection columns = new HashSet(); for (Iterator it = getMetaModel().getAllKeyPropertiesNames().iterator(); it.hasNext(); ) { String pr = (String) it.next(); String column = getColumn(pr); if (columns.contains(column)) continue; columns.add(column); r.append(column); r.append(' '); } return r.toString().trim(); } private boolean supportsSchemasInDataManipulation() { loadDatabaseMetadata(); return supportsSchemasInDataManipulation; } /** Wraps the column name with the SQL function for extracting the year from a date. */ public String yearSQLFunction(String column) { if (supportsYearFunction()) return "year(" + column + ")"; return "extract (year from " + column + ")"; } /** Wraps the column name with the SQL function for extracting the month from a date. */ public String monthSQLFunction(String column) { if (supportsMonthFunction()) return "month(" + column + ")"; return "extract (month from " + column + ")"; } /** * To ignore accents: just to search 'cami�n' or 'camion' * * <p>Good performance using 'translate' but is very slow when it use 'replace...' * * @since v4m6 */ public String translateSQLFunction(String column) { if (supportsTranslateFunction()) return "translate(" + column + ",'aeiouAEIOU','áéíóúÁÉÍÓÚ')"; return "replace(replace(replace(replace(replace(replace(replace(replace(replace(replace(" + column + ", 'Ú', 'U'), 'ú', 'u'), 'Ó', 'O'), 'ó', 'o'), 'Í', 'I'), " + "'í', 'i'), 'É', 'E'), 'é', 'e'), 'Á', 'A'), 'á', 'a')"; } private boolean supportsYearFunction() { loadDatabaseMetadata(); return supportsYearFunction; } private boolean supportsMonthFunction() { loadDatabaseMetadata(); return supportsMonthFunction; } /** @since v4m6 */ private boolean supportsTranslateFunction() { loadDatabaseMetadata(); return supportsTranslateFunction; } private void loadDatabaseMetadata() { if (!databaseMetadataLoaded) { String componentName = "UNKNOWN"; Connection con = null; try { componentName = getMetaComponent().getName(); con = DataSourceConnectionProvider.getByComponent(componentName).getConnection(); DatabaseMetaData metaData = con.getMetaData(); supportsSchemasInDataManipulation = metaData.supportsSchemasInDataManipulation(); Collection timeDateFunctions = Strings.toCollection(metaData.getTimeDateFunctions().toUpperCase()); // // another solution instead of the use of 'if' would be to use a xml with // the information of the functions from each BBDD if ("DB2 UDB for AS/400".equals(metaData.getDatabaseProductName()) || "Oracle".equals(metaData.getDatabaseProductName()) || "PostgresSQL".equals(metaData.getDatabaseProductName())) { supportsTranslateFunction = true; } if ("Oracle".equals(metaData.getDatabaseProductName()) || "PostgreSQL".equals(metaData.getDatabaseProductName())) { supportsYearFunction = supportsMonthFunction = false; } else { supportsYearFunction = timeDateFunctions.contains("YEAR"); supportsMonthFunction = timeDateFunctions.contains("MONTH"); } databaseMetadataLoaded = true; } catch (Exception ex) { log.warn(XavaResources.getString("load_database_metadata_warning")); } finally { try { if (con != null) { con.close(); } } catch (SQLException e) { log.warn(XavaResources.getString("close_connection_warning")); } } } } public String getQualifiedColumn(String modelProperty) throws XavaException { PropertyMapping propertyMapping = (PropertyMapping) propertyMappings.get(modelProperty); if (propertyMapping != null && propertyMapping.hasFormula()) return getColumn(modelProperty); String tableColumn = getTableColumn(modelProperty, true); if (Is.emptyString(tableColumn)) return "'" + modelProperty + "'"; if (referencePropertyWithFormula) { referencePropertyWithFormula = false; return tableColumn; } // for calculated fields or created by multiple converter if (modelProperty.indexOf('.') >= 0) { if (tableColumn.indexOf('.') < 0) return tableColumn; String reference = modelProperty.substring(0, modelProperty.lastIndexOf('.')); if (tableColumn.startsWith(getTableToQualifyColumn() + ".")) { String member = modelProperty.substring(modelProperty.lastIndexOf('.') + 1); if (getMetaModel().getMetaReference(reference).getMetaModelReferenced().isKey(member)) return tableColumn; } // The next code uses the alias of the table instead of its name. In order to // support multiple references to the same model if (reference.indexOf('.') >= 0) { if (getMetaModel().getMetaProperty(modelProperty).isKey()) { reference = reference.substring(0, reference.lastIndexOf('.')); } reference = reference.replaceAll("\\.", "_"); } return "T_" + reference + tableColumn.substring(tableColumn.lastIndexOf('.')); } else { return getTableToQualifyColumn() + "." + tableColumn; } } /** Support the use of references with dots, this is: myreference.myproperty. */ public String getColumn(String modelProperty) throws ElementNotFoundException, XavaException { return getTableColumn(modelProperty, false); } private String getTableColumn(String modelProperty, boolean qualifyReferenceMappingColumn) throws XavaException { PropertyMapping propertyMapping = (PropertyMapping) propertyMappings.get(modelProperty); if (propertyMapping == null) { int idx = modelProperty.indexOf('.'); if (idx >= 0) { String referenceName = modelProperty.substring(0, idx); String propertyName = modelProperty.substring(idx + 1); if (getMetaModel().getMetaReference(referenceName).isAggregate() && !Strings.firstUpper(referenceName).equals(getMetaModel().getContainerModelName())) { propertyMapping = (PropertyMapping) propertyMappings.get(referenceName + "_" + propertyName); if (propertyMapping == null) { int idx2 = propertyName.indexOf('.'); if (idx2 >= 0) { String referenceName2 = propertyName.substring(0, idx2); String propertyName2 = propertyName.substring(idx2 + 1); return getTableColumn( referenceName + "_" + referenceName2 + "." + propertyName2, qualifyReferenceMappingColumn); } else { throw new ElementNotFoundException( "property_mapping_not_found", referenceName + "_" + propertyName, getModelName()); } } return propertyMapping.getColumn(); } ReferenceMapping referenceMapping = getReferenceMapping(referenceName); if (referenceMapping.hasColumnForReferencedModelProperty(propertyName)) { if (qualifyReferenceMappingColumn) { return getTableToQualifyColumn() + "." + referenceMapping.getColumnForReferencedModelProperty(propertyName); } else { return referenceMapping.getColumnForReferencedModelProperty(propertyName); } } else { ModelMapping referencedMapping = referenceMapping.getReferencedMapping(); String tableName = referencedMapping.getTableToQualifyColumn(); boolean secondLevel = propertyName.indexOf('.') >= 0; String columnName = referencedMapping.getTableColumn(propertyName, secondLevel); boolean hasFormula = referencedMapping.getPropertyMapping(propertyName).hasFormula(); if (qualifyReferenceMappingColumn && !secondLevel && !hasFormula) { return tableName + "." + columnName; } else if (hasFormula) { String formula = referencedMapping.getPropertyMapping(propertyName).getFormula(); referencePropertyWithFormula = true; return qualifyFormulaWithReferenceName( formula, referencedMapping.getModelName(), modelProperty); } else { return columnName; } } } throw new ElementNotFoundException( "property_mapping_not_found", modelProperty, getModelName()); } if (propertyMapping.hasFormula()) return propertyMapping.getFormula(); return propertyMapping.getColumn(); } /** * @exception ElementNotFoundException If property does not exist. * @exception XavaException Any problem * @return nulo If property exists but it does not have converter. */ public IConverter getConverter(String modelProperty) throws ElementNotFoundException, XavaException { return getPropertyMapping(modelProperty).getConverter(); } /** * @exception ElementNotFoundException If property does not exist. * @exception XavaException Any problem * @return nulo If property exists but it does not have converter. */ public IMultipleConverter getMultipleConverter(String modelProperty) throws ElementNotFoundException, XavaException { return getPropertyMapping(modelProperty).getMultipleConverter(); } /** If the property exists and has converter. */ public boolean hasConverter(String propertyName) { try { return getPropertyMapping(propertyName).hasConverter(); } catch (XavaException ex) { return false; } } public MetaComponent getMetaComponent() { return metaComponent; } public void setMetaComponent(MetaComponent componente) throws XavaException { this.metaComponent = componente; setupDefaultConverters(); } /** * Change the properties inside ${ } by the database qualified(schema + table) columns. Also if * the property inside ${ } is a model name it changes by the table name * * <p>For example, it would change: * * <pre> * select ${number}, ${name} from ${Tercero} * </pre> * * by * * <pre> * select G4GENBD.GENTGER.TGRCOD, G4GENBD.GENTGER.TGRDEN from G4GENBD.GENTGER * </pre> */ public String changePropertiesByColumns(String source) throws XavaException { return changePropertiesByColumns(source, true); } /** * Change the properties inside ${ } by the database columns without table and schema as prefix. * Also if the property inside ${ } is a model name it changes by the table name. * * <p>For example, it would change: * * <pre> * select ${number}, ${name} from ${Tercero} * </pre> * * by * * <pre> * select TGRCOD, TGRDEN * from G4GENBD.GENTGER * </pre> */ public String changePropertiesByNotQualifiedColumns(String source) throws XavaException { return changePropertiesByColumns(source, false); } private String changePropertiesByColumns(String source, boolean qualified) throws XavaException { StringBuffer r = new StringBuffer(source); int i = r.toString().indexOf("${"); int f = 0; while (i >= 0) { f = r.toString().indexOf("}", i + 2); if (f < 0) break; String property = r.substring(i + 2, f); String column = "0"; // thus it remained if it is calculated if (!getMetaModel().isCalculated(property)) { column = Strings.isModelName(property) ? getTable(property) : qualified ? getQualifiedColumn(property) : getColumn(property); } r.replace(i, f + 1, column); i = r.toString().indexOf("${"); } return r.toString(); } /** @since 4.1 */ private String getTable(String name) { return MetaComponent.get(name).getEntityMapping().getTable(); } public String changePropertiesByCMPAttributes(String source) throws XavaException { StringBuffer r = new StringBuffer(source); int i = r.toString().indexOf("${"); int f = 0; while (i >= 0) { f = r.toString().indexOf("}", i + 2); if (f < 0) break; String property = r.substring(i + 2, f); String cmpAttribute = null; if (property.indexOf('.') >= 0) { cmpAttribute = "o._" + Strings.firstUpper(Strings.change(property, ".", "_")); } else { MetaProperty metaProperty = getMetaModel().getMetaProperty(property); if (metaProperty.getMapping().hasConverter()) { cmpAttribute = "o._" + Strings.firstUpper(property); } else { cmpAttribute = "o." + property; } } r.replace(i, f + 1, cmpAttribute); i = r.toString().indexOf("${"); } return r.toString(); } public boolean hasPropertyMapping(String memberName) { return propertyMappings.containsKey(memberName); } private void setupDefaultConverters() throws XavaException { Iterator it = propertyMappings.values().iterator(); while (it.hasNext()) { PropertyMapping propertyMapping = (PropertyMapping) it.next(); propertyMapping.setDefaultConverter(); } } public boolean hasReferenceMapping(MetaReference metaReference) { if (referenceMappings == null) return false; return referenceMappings.containsKey(metaReference.getName()); } public boolean isReferenceOverlappingWithSomeProperty( String reference, String propertiesOfReference) throws XavaException { String column = getReferenceMapping(reference).getColumnForReferencedModelProperty(propertiesOfReference); return containsColumn(getColumns(), column); } public boolean isReferenceOverlappingWithSomeProperty(String reference) throws XavaException { Iterator it = getReferenceMapping(reference).getDetails().iterator(); while (it.hasNext()) { ReferenceMappingDetail d = (ReferenceMappingDetail) it.next(); if (containsColumn(getColumns(), d.getColumn())) { String property = getMappingForColumn(d.getColumn()).getProperty(); if (!property.startsWith(reference + "_")) { return true; } } } return false; } public boolean isReferencePropertyOverlappingWithSomeProperty(String qualifiedProperty) throws XavaException { int idx = qualifiedProperty.indexOf('.'); if (idx < 0) return false; String ref = qualifiedProperty.substring(0, idx); String pr = qualifiedProperty.substring(idx + 1); return isReferenceOverlappingWithSomeProperty(ref, pr); } /** @throws XavaException If it does not have a overlapped property, or any other problem. */ public String getOverlappingPropertyForReference(String reference, String propertyOfReference) throws XavaException { String column = getReferenceMapping(reference).getColumnForReferencedModelProperty(propertyOfReference); if (propertyMappings == null) { throw new XavaException("reference_property_not_overlapped", propertyOfReference, reference); } Iterator it = propertyMappings.values().iterator(); while (it.hasNext()) { PropertyMapping mapping = (PropertyMapping) it.next(); if (column.equalsIgnoreCase(mapping.getColumn())) return mapping.getProperty(); } throw new XavaException("reference_property_not_overlapped", propertyOfReference, reference); } /** @return Of <tt>String</tt> and not null. */ public Collection getOverlappingPropertiesOfReference(String reference) throws XavaException { Collection overlappingPropertiesOfReference = new ArrayList(); Iterator it = getReferenceMapping(reference).getDetails().iterator(); while (it.hasNext()) { ReferenceMappingDetail d = (ReferenceMappingDetail) it.next(); if (containsColumn(getColumns(), d.getColumn())) { String property = getMappingForColumn(d.getColumn()).getProperty(); if (!property.startsWith(reference + "_")) { overlappingPropertiesOfReference.add(d.getReferencedModelProperty()); } } } return overlappingPropertiesOfReference; } private boolean containsColumn(Collection columns, String column) { if (columns.contains(column)) return true; for (Iterator it = columns.iterator(); it.hasNext(); ) { if (((String) it.next()).equalsIgnoreCase(column)) return true; } return false; } private PropertyMapping getMappingForColumn(String column) throws XavaException { if (propertyMappings == null) { throw new ElementNotFoundException("mapping_not_found_no_property_mappings", column); } Iterator it = propertyMappings.values().iterator(); while (it.hasNext()) { PropertyMapping propertyMapping = (PropertyMapping) it.next(); if (propertyMapping.getColumn().equalsIgnoreCase(column)) { return propertyMapping; } } throw new ElementNotFoundException("mapping_for_column_not_found", column); } String getCMPAttributeForColumn(String column) throws XavaException { PropertyMapping mapping = getMappingForColumn(column); if (!mapping.hasConverter()) return Strings.change(mapping.getProperty(), ".", "_"); return "_" + Strings.change(Strings.firstUpper(mapping.getProperty()), ".", "_"); } private Collection getPropertyMappings() { return propertyMappings.values(); } public Collection getPropertyMappingsNotInModel() throws XavaException { Collection names = new ArrayList(getModelProperties()); names.removeAll(getMetaModel().getPropertiesNames()); if (names.isEmpty()) return Collections.EMPTY_LIST; Collection result = new ArrayList(); for (Iterator it = names.iterator(); it.hasNext(); ) { String name = (String) it.next(); if (name.indexOf('_') < 0) { result.add(getPropertyMapping(name)); } } return result; } private Collection getReferenceMappings() { return referenceMappings == null ? Collections.EMPTY_LIST : referenceMappings.values(); } public Collection getCmpFields() throws XavaException { Collection r = new ArrayList(); Collection mappedColumns = new HashSet(); for (Iterator it = getPropertyMappings().iterator(); it.hasNext(); ) { PropertyMapping pMapping = (PropertyMapping) it.next(); r.addAll(pMapping.getCmpFields()); mappedColumns.add(pMapping.getColumn()); } for (Iterator it = getReferenceMappings().iterator(); it.hasNext(); ) { ReferenceMapping rMapping = (ReferenceMapping) it.next(); for (Iterator itFields = rMapping.getCmpFields().iterator(); itFields.hasNext(); ) { CmpField field = (CmpField) itFields.next(); if (!mappedColumns.contains(field.getColumn())) { r.add(field); mappedColumns.add(field.getColumn()); } } } return r; } public boolean hasReferenceConverters() { return !getReferenceMappingsWithConverter().isEmpty(); } public Collection getReferenceMappingsWithConverter() { if (referenceMappingsWithConverter == null) { referenceMappingsWithConverter = new ArrayList(); Iterator it = getReferenceMappings().iterator(); while (it.hasNext()) { ReferenceMapping referenceMapping = (ReferenceMapping) it.next(); Collection mrd = referenceMapping.getDetails(); Iterator itd = mrd.iterator(); while (itd.hasNext()) { ReferenceMappingDetail referenceMappingDetail = (ReferenceMappingDetail) itd.next(); if (referenceMappingDetail.hasConverter()) { referenceMappingsWithConverter.add(referenceMapping); } } } } return referenceMappingsWithConverter; } /** * Find the columns name in the formula and replace its by qualify columns name: 'name' -> * 't_reference.name' */ private String qualifyFormulaWithReferenceName( String formula, String referenceName, String modelProperty) { EntityMapping em = MetaComponent.get(referenceName).getEntityMapping(); Iterator<String> it = em.getColumns().iterator(); while (it.hasNext()) { String column = it.next(); if (formula.contains(column)) { formula = formula.replace( column, getQualifyColumnName(modelProperty, referenceName + "." + column)); } } return formula; } private String getQualifyColumnName(String modelProperty, String tableColumn) { if (modelProperty.indexOf('.') >= 0) { if (tableColumn.indexOf('.') < 0) return tableColumn; String reference = modelProperty.substring(0, modelProperty.lastIndexOf('.')); if (tableColumn.startsWith(getTableToQualifyColumn() + ".")) { String member = modelProperty.substring(modelProperty.lastIndexOf('.') + 1); if (getMetaModel().getMetaReference(reference).getMetaModelReferenced().isKey(member)) return tableColumn; } // The next code uses the alias of the table instead of its name. In order to // support multiple references to the same model if (reference.indexOf('.') >= 0) { if (getMetaModel().getMetaProperty(modelProperty).isKey()) { reference = reference.substring(0, reference.lastIndexOf('.')); } reference = reference.substring(reference.lastIndexOf('.') + 1); } return "T_" + reference + tableColumn.substring(tableColumn.lastIndexOf('.')); } else { return getTableToQualifyColumn() + "." + tableColumn; } } }
public class SearchResultBean implements Serializable { /** */ private static final long serialVersionUID = -837802320118584736L; protected Search.Result searchResult; protected static Log logger = LogFactory.getLog(SearchResultBean.class.getName()); protected Locale locale; protected boolean display = false; protected boolean selected = false; public SearchResultBean() { display = false; } public SearchResultBean(Search.Result searchResult, Locale locale) { this.searchResult = searchResult; this.locale = locale; this.display = true; this.selected = true; } public boolean getDisplay() { return display; } public boolean getSelected() { return selected; } public void setSelected(boolean selected) { logger.debug("SearchResultBean.setChecked():" + getUniqueID() + "," + selected); this.selected = selected; } public List<DisplayField> getFieldValues() { ArrayList<DisplayField> list = new ArrayList<DisplayField>(); EmailFields emailFields = Config.getConfig().getEmailFields(); for (EmailField field : emailFields.getAvailableFields().values()) { if (field.getShowInResults() != EmailField.ShowInResults.NORESULTS) { try { EmailFieldValue efv = searchResult.getFieldValue(field.getName()); list.add(DisplayField.getDisplayField(efv, locale, false)); } catch (MessageSearchException mse) { logger.debug("failed to retrieve field value from message: " + mse.getMessage()); } } } return list; } public String getUniqueID() { try { return searchResult.getEmailId().getUniqueID(); } catch (MessageSearchException mse) { logger.debug("failed to retrieve unique message id: " + mse.getMessage(), mse); return null; } } public boolean getMessageExist() { try { EmailID emailID = searchResult.getEmailId(); Volume volume = emailID.getVolume(); return (volume != null); /*if (volume!=null) { Archiver archiver = Config.getConfig().getArchiver(); boolean exists = archiver.isMessageExist(emailID); if (!exists) { logger.debug("message is not accessible on disk"); } return exists; } else { logger.debug("could not lookup volume. the index appears out of sync with volumeinfo ID field."); }*/ } catch (Exception e) { logger.debug("failed to determine if message exists in store:" + e.getMessage(), e); } return false; } public String getVolumeID() { try { EmailID emailID = searchResult.getEmailId(); Volume volume = emailID.getVolume(); if (volume != null) { String volumeID = volume.getID(); return volumeID; } else return null; } catch (MessageSearchException mse) { logger.debug("failed to retrieve volumeid: " + mse.getMessage(), mse); return null; } // return searchResult.getEmailId().getVolume().getID(); } public static synchronized List<SearchResultBean> getSearchResultBeans( List<Search.Result> results, Locale locale) { List<SearchResultBean> searchResultBeans = new LinkedList<SearchResultBean>(); try { for (Search.Result result : results) { searchResultBeans.add(new SearchResultBean(result, locale)); } while (searchResultBeans.size() < Config.getConfig().getSearch().getMaxSearchResults()) { searchResultBeans.add(new SearchResultBean()); } } catch (java.util.ConcurrentModificationException ce) { // bit of a hack to say the least try { Thread.sleep(50); } catch (Exception e) { } return getSearchResultBeans(results, locale); } return searchResultBeans; } }
/** * Client for accessing AWS CloudFormation. All service calls made using this client are blocking, * and will not return until the service call completes. * * <p><fullname>AWS CloudFormation</fullname> * * <p>AWS CloudFormation enables you to create and manage AWS infrastructure deployments predictably * and repeatedly. AWS CloudFormation helps you leverage AWS products such as Amazon EC2, EBS, * Amazon SNS, ELB, and Auto Scaling to build highly-reliable, highly scalable, cost effective * applications without worrying about creating and configuring the underlying AWS infrastructure. * * <p>With AWS CloudFormation, you declare all of your resources and dependencies in a template * file. The template defines a collection of resources as a single unit called a stack. AWS * CloudFormation creates and deletes all member resources of the stack together and manages all * dependencies between the resources for you. * * <p>For more information about this product, go to the <a * href="http://aws.amazon.com/cloudformation/">CloudFormation Product Page</a>. * * <p>Amazon CloudFormation makes use of other AWS products. If you need additional technical * information about a specific AWS product, you can find the product's technical documentation at * <a href="http://aws.amazon.com/documentation/" >http://aws.amazon.com/documentation/</a>. */ public class AmazonCloudFormationClient extends AmazonWebServiceClient implements AmazonCloudFormation { /** Provider for AWS credentials. */ private AWSCredentialsProvider awsCredentialsProvider; private static final Log log = LogFactory.getLog(AmazonCloudFormation.class); /** Default signing name for the service. */ private static final String DEFAULT_SIGNING_NAME = "cloudformation"; /** The region metadata service name for computing region endpoints. */ private static final String DEFAULT_ENDPOINT_PREFIX = "cloudformation"; /** List of exception unmarshallers for all AWS CloudFormation exceptions. */ protected final List<Unmarshaller<AmazonServiceException, Node>> exceptionUnmarshallers = new ArrayList<Unmarshaller<AmazonServiceException, Node>>(); /** * Constructs a new client to invoke service methods on AWS CloudFormation. A credentials provider * chain will be used that searches for credentials in this order: * * <ul> * <li>Environment Variables - AWS_ACCESS_KEY_ID and AWS_SECRET_KEY * <li>Java System Properties - aws.accessKeyId and aws.secretKey * <li>Instance profile credentials delivered through the Amazon EC2 metadata service * </ul> * * <p>All service calls made using this new client object are blocking, and will not return until * the service call completes. * * @see DefaultAWSCredentialsProviderChain */ public AmazonCloudFormationClient() { this( new DefaultAWSCredentialsProviderChain(), com.amazonaws.PredefinedClientConfigurations.defaultConfig()); } /** * Constructs a new client to invoke service methods on AWS CloudFormation. A credentials provider * chain will be used that searches for credentials in this order: * * <ul> * <li>Environment Variables - AWS_ACCESS_KEY_ID and AWS_SECRET_KEY * <li>Java System Properties - aws.accessKeyId and aws.secretKey * <li>Instance profile credentials delivered through the Amazon EC2 metadata service * </ul> * * <p>All service calls made using this new client object are blocking, and will not return until * the service call completes. * * @param clientConfiguration The client configuration options controlling how this client * connects to AWS CloudFormation (ex: proxy settings, retry counts, etc.). * @see DefaultAWSCredentialsProviderChain */ public AmazonCloudFormationClient(ClientConfiguration clientConfiguration) { this(new DefaultAWSCredentialsProviderChain(), clientConfiguration); } /** * Constructs a new client to invoke service methods on AWS CloudFormation using the specified AWS * account credentials. * * <p>All service calls made using this new client object are blocking, and will not return until * the service call completes. * * @param awsCredentials The AWS credentials (access key ID and secret key) to use when * authenticating with AWS services. */ public AmazonCloudFormationClient(AWSCredentials awsCredentials) { this(awsCredentials, com.amazonaws.PredefinedClientConfigurations.defaultConfig()); } /** * Constructs a new client to invoke service methods on AWS CloudFormation using the specified AWS * account credentials and client configuration options. * * <p>All service calls made using this new client object are blocking, and will not return until * the service call completes. * * @param awsCredentials The AWS credentials (access key ID and secret key) to use when * authenticating with AWS services. * @param clientConfiguration The client configuration options controlling how this client * connects to AWS CloudFormation (ex: proxy settings, retry counts, etc.). */ public AmazonCloudFormationClient( AWSCredentials awsCredentials, ClientConfiguration clientConfiguration) { super(clientConfiguration); this.awsCredentialsProvider = new StaticCredentialsProvider(awsCredentials); init(); } /** * Constructs a new client to invoke service methods on AWS CloudFormation using the specified AWS * account credentials provider. * * <p>All service calls made using this new client object are blocking, and will not return until * the service call completes. * * @param awsCredentialsProvider The AWS credentials provider which will provide credentials to * authenticate requests with AWS services. */ public AmazonCloudFormationClient(AWSCredentialsProvider awsCredentialsProvider) { this(awsCredentialsProvider, com.amazonaws.PredefinedClientConfigurations.defaultConfig()); } /** * Constructs a new client to invoke service methods on AWS CloudFormation using the specified AWS * account credentials provider and client configuration options. * * <p>All service calls made using this new client object are blocking, and will not return until * the service call completes. * * @param awsCredentialsProvider The AWS credentials provider which will provide credentials to * authenticate requests with AWS services. * @param clientConfiguration The client configuration options controlling how this client * connects to AWS CloudFormation (ex: proxy settings, retry counts, etc.). */ public AmazonCloudFormationClient( AWSCredentialsProvider awsCredentialsProvider, ClientConfiguration clientConfiguration) { this(awsCredentialsProvider, clientConfiguration, null); } /** * Constructs a new client to invoke service methods on AWS CloudFormation using the specified AWS * account credentials provider, client configuration options, and request metric collector. * * <p>All service calls made using this new client object are blocking, and will not return until * the service call completes. * * @param awsCredentialsProvider The AWS credentials provider which will provide credentials to * authenticate requests with AWS services. * @param clientConfiguration The client configuration options controlling how this client * connects to AWS CloudFormation (ex: proxy settings, retry counts, etc.). * @param requestMetricCollector optional request metric collector */ public AmazonCloudFormationClient( AWSCredentialsProvider awsCredentialsProvider, ClientConfiguration clientConfiguration, RequestMetricCollector requestMetricCollector) { super(clientConfiguration, requestMetricCollector); this.awsCredentialsProvider = awsCredentialsProvider; init(); } private void init() { exceptionUnmarshallers.add(new LimitExceededExceptionUnmarshaller()); exceptionUnmarshallers.add(new AlreadyExistsExceptionUnmarshaller()); exceptionUnmarshallers.add(new InsufficientCapabilitiesExceptionUnmarshaller()); exceptionUnmarshallers.add(new StandardErrorUnmarshaller()); setServiceNameIntern(DEFAULT_SIGNING_NAME); setEndpointPrefix(DEFAULT_ENDPOINT_PREFIX); // calling this.setEndPoint(...) will also modify the signer accordingly this.setEndpoint("https://cloudformation.us-east-1.amazonaws.com"); HandlerChainFactory chainFactory = new HandlerChainFactory(); requestHandler2s.addAll( chainFactory.newRequestHandlerChain( "/com/amazonaws/services/cloudformation/request.handlers")); requestHandler2s.addAll( chainFactory.newRequestHandler2Chain( "/com/amazonaws/services/cloudformation/request.handler2s")); } /** * Cancels an update on the specified stack. If the call completes successfully, the stack rolls * back the update and reverts to the previous stack configuration. <note>You can cancel only * stacks that are in the UPDATE_IN_PROGRESS state.</note> * * @param cancelUpdateStackRequest The input for the <a>CancelUpdateStack</a> action. * @sample AmazonCloudFormation.CancelUpdateStack */ @Override public void cancelUpdateStack(CancelUpdateStackRequest cancelUpdateStackRequest) { ExecutionContext executionContext = createExecutionContext(cancelUpdateStackRequest); AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<CancelUpdateStackRequest> request = null; Response<Void> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new CancelUpdateStackRequestMarshaller() .marshall(super.beforeMarshalling(cancelUpdateStackRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<Void> responseHandler = new StaxResponseHandler<Void>(null); invoke(request, responseHandler, executionContext); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * Creates a stack as specified in the template. After the call completes successfully, the stack * creation starts. You can check the status of the stack via the <a>DescribeStacks</a> API. * * @param createStackRequest The input for <a>CreateStack</a> action. * @return Result of the CreateStack operation returned by the service. * @throws LimitExceededException Quota for the resource has already been reached. * @throws AlreadyExistsException Resource with the name requested already exists. * @throws InsufficientCapabilitiesException The template contains resources with capabilities * that were not specified in the Capabilities parameter. * @sample AmazonCloudFormation.CreateStack */ @Override public CreateStackResult createStack(CreateStackRequest createStackRequest) { ExecutionContext executionContext = createExecutionContext(createStackRequest); AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<CreateStackRequest> request = null; Response<CreateStackResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new CreateStackRequestMarshaller() .marshall(super.beforeMarshalling(createStackRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<CreateStackResult> responseHandler = new StaxResponseHandler<CreateStackResult>(new CreateStackResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * Deletes a specified stack. Once the call completes successfully, stack deletion starts. Deleted * stacks do not show up in the <a>DescribeStacks</a> API if the deletion has been completed * successfully. * * @param deleteStackRequest The input for <a>DeleteStack</a> action. * @sample AmazonCloudFormation.DeleteStack */ @Override public void deleteStack(DeleteStackRequest deleteStackRequest) { ExecutionContext executionContext = createExecutionContext(deleteStackRequest); AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<DeleteStackRequest> request = null; Response<Void> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new DeleteStackRequestMarshaller() .marshall(super.beforeMarshalling(deleteStackRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<Void> responseHandler = new StaxResponseHandler<Void>(null); invoke(request, responseHandler, executionContext); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * Retrieves your account's AWS CloudFormation limits, such as the maximum number of stacks that * you can create in your account. * * @param describeAccountLimitsRequest The input for the <a>DescribeAccountLimits</a> action. * @return Result of the DescribeAccountLimits operation returned by the service. * @sample AmazonCloudFormation.DescribeAccountLimits */ @Override public DescribeAccountLimitsResult describeAccountLimits( DescribeAccountLimitsRequest describeAccountLimitsRequest) { ExecutionContext executionContext = createExecutionContext(describeAccountLimitsRequest); AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<DescribeAccountLimitsRequest> request = null; Response<DescribeAccountLimitsResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new DescribeAccountLimitsRequestMarshaller() .marshall(super.beforeMarshalling(describeAccountLimitsRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<DescribeAccountLimitsResult> responseHandler = new StaxResponseHandler<DescribeAccountLimitsResult>( new DescribeAccountLimitsResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * Returns all stack related events for a specified stack. For more information about a stack's * event history, go to <a href= * "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/concept-stack.html" >Stacks</a> * in the AWS CloudFormation User Guide. <note>You can list events for stacks that have failed to * create or have been deleted by specifying the unique stack identifier (stack ID).</note> * * @param describeStackEventsRequest The input for <a>DescribeStackEvents</a> action. * @return Result of the DescribeStackEvents operation returned by the service. * @sample AmazonCloudFormation.DescribeStackEvents */ @Override public DescribeStackEventsResult describeStackEvents( DescribeStackEventsRequest describeStackEventsRequest) { ExecutionContext executionContext = createExecutionContext(describeStackEventsRequest); AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<DescribeStackEventsRequest> request = null; Response<DescribeStackEventsResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new DescribeStackEventsRequestMarshaller() .marshall(super.beforeMarshalling(describeStackEventsRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<DescribeStackEventsResult> responseHandler = new StaxResponseHandler<DescribeStackEventsResult>( new DescribeStackEventsResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * Returns a description of the specified resource in the specified stack. * * <p>For deleted stacks, DescribeStackResource returns resource information for up to 90 days * after the stack has been deleted. * * @param describeStackResourceRequest The input for <a>DescribeStackResource</a> action. * @return Result of the DescribeStackResource operation returned by the service. * @sample AmazonCloudFormation.DescribeStackResource */ @Override public DescribeStackResourceResult describeStackResource( DescribeStackResourceRequest describeStackResourceRequest) { ExecutionContext executionContext = createExecutionContext(describeStackResourceRequest); AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<DescribeStackResourceRequest> request = null; Response<DescribeStackResourceResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new DescribeStackResourceRequestMarshaller() .marshall(super.beforeMarshalling(describeStackResourceRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<DescribeStackResourceResult> responseHandler = new StaxResponseHandler<DescribeStackResourceResult>( new DescribeStackResourceResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * Returns AWS resource descriptions for running and deleted stacks. If <code>StackName</code> is * specified, all the associated resources that are part of the stack are returned. If <code> * PhysicalResourceId</code> is specified, the associated resources of the stack that the resource * belongs to are returned. <note>Only the first 100 resources will be returned. If your stack has * more resources than this, you should use <code>ListStackResources</code> instead.</note> * * <p>For deleted stacks, <code>DescribeStackResources</code> returns resource information for up * to 90 days after the stack has been deleted. * * <p>You must specify either <code>StackName</code> or <code>PhysicalResourceId</code>, but not * both. In addition, you can specify <code>LogicalResourceId</code> to filter the returned * result. For more information about resources, the <code>LogicalResourceId</code> and <code> * PhysicalResourceId</code>, go to the <a * href="http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide">AWS CloudFormation User * Guide</a>. <note>A <code>ValidationError</code> is returned if you specify both <code>StackName * </code> and <code>PhysicalResourceId</code> in the same request.</note> * * @param describeStackResourcesRequest The input for <a>DescribeStackResources</a> action. * @return Result of the DescribeStackResources operation returned by the service. * @sample AmazonCloudFormation.DescribeStackResources */ @Override public DescribeStackResourcesResult describeStackResources( DescribeStackResourcesRequest describeStackResourcesRequest) { ExecutionContext executionContext = createExecutionContext(describeStackResourcesRequest); AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<DescribeStackResourcesRequest> request = null; Response<DescribeStackResourcesResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new DescribeStackResourcesRequestMarshaller() .marshall(super.beforeMarshalling(describeStackResourcesRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<DescribeStackResourcesResult> responseHandler = new StaxResponseHandler<DescribeStackResourcesResult>( new DescribeStackResourcesResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * Returns the description for the specified stack; if no stack name was specified, then it * returns the description for all the stacks created. * * @param describeStacksRequest The input for <a>DescribeStacks</a> action. * @return Result of the DescribeStacks operation returned by the service. * @sample AmazonCloudFormation.DescribeStacks */ @Override public DescribeStacksResult describeStacks(DescribeStacksRequest describeStacksRequest) { ExecutionContext executionContext = createExecutionContext(describeStacksRequest); AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<DescribeStacksRequest> request = null; Response<DescribeStacksResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new DescribeStacksRequestMarshaller() .marshall(super.beforeMarshalling(describeStacksRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<DescribeStacksResult> responseHandler = new StaxResponseHandler<DescribeStacksResult>(new DescribeStacksResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } @Override public DescribeStacksResult describeStacks() { return describeStacks(new DescribeStacksRequest()); } /** * Returns the estimated monthly cost of a template. The return value is an AWS Simple Monthly * Calculator URL with a query string that describes the resources required to run the template. * * @param estimateTemplateCostRequest * @return Result of the EstimateTemplateCost operation returned by the service. * @sample AmazonCloudFormation.EstimateTemplateCost */ @Override public EstimateTemplateCostResult estimateTemplateCost( EstimateTemplateCostRequest estimateTemplateCostRequest) { ExecutionContext executionContext = createExecutionContext(estimateTemplateCostRequest); AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<EstimateTemplateCostRequest> request = null; Response<EstimateTemplateCostResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new EstimateTemplateCostRequestMarshaller() .marshall(super.beforeMarshalling(estimateTemplateCostRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<EstimateTemplateCostResult> responseHandler = new StaxResponseHandler<EstimateTemplateCostResult>( new EstimateTemplateCostResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } @Override public EstimateTemplateCostResult estimateTemplateCost() { return estimateTemplateCost(new EstimateTemplateCostRequest()); } /** * Returns the stack policy for a specified stack. If a stack doesn't have a policy, a null value * is returned. * * @param getStackPolicyRequest The input for the <a>GetStackPolicy</a> action. * @return Result of the GetStackPolicy operation returned by the service. * @sample AmazonCloudFormation.GetStackPolicy */ @Override public GetStackPolicyResult getStackPolicy(GetStackPolicyRequest getStackPolicyRequest) { ExecutionContext executionContext = createExecutionContext(getStackPolicyRequest); AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<GetStackPolicyRequest> request = null; Response<GetStackPolicyResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new GetStackPolicyRequestMarshaller() .marshall(super.beforeMarshalling(getStackPolicyRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<GetStackPolicyResult> responseHandler = new StaxResponseHandler<GetStackPolicyResult>(new GetStackPolicyResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * Returns the template body for a specified stack. You can get the template for running or * deleted stacks. * * <p>For deleted stacks, GetTemplate returns the template for up to 90 days after the stack has * been deleted. <note> If the template does not exist, a <code>ValidationError</code> is * returned. </note> * * @param getTemplateRequest The input for a <a>GetTemplate</a> action. * @return Result of the GetTemplate operation returned by the service. * @sample AmazonCloudFormation.GetTemplate */ @Override public GetTemplateResult getTemplate(GetTemplateRequest getTemplateRequest) { ExecutionContext executionContext = createExecutionContext(getTemplateRequest); AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<GetTemplateRequest> request = null; Response<GetTemplateResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new GetTemplateRequestMarshaller() .marshall(super.beforeMarshalling(getTemplateRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<GetTemplateResult> responseHandler = new StaxResponseHandler<GetTemplateResult>(new GetTemplateResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * Returns information about a new or existing template. The <code>GetTemplateSummary</code> * action is useful for viewing parameter information, such as default parameter values and * parameter types, before you create or update a stack. * * <p>You can use the <code>GetTemplateSummary</code> action when you submit a template, or you * can get template information for a running or deleted stack. * * <p>For deleted stacks, <code>GetTemplateSummary</code> returns the template information for up * to 90 days after the stack has been deleted. If the template does not exist, a <code> * ValidationError</code> is returned. * * @param getTemplateSummaryRequest The input for the <a>GetTemplateSummary</a> action. * @return Result of the GetTemplateSummary operation returned by the service. * @sample AmazonCloudFormation.GetTemplateSummary */ @Override public GetTemplateSummaryResult getTemplateSummary( GetTemplateSummaryRequest getTemplateSummaryRequest) { ExecutionContext executionContext = createExecutionContext(getTemplateSummaryRequest); AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<GetTemplateSummaryRequest> request = null; Response<GetTemplateSummaryResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new GetTemplateSummaryRequestMarshaller() .marshall(super.beforeMarshalling(getTemplateSummaryRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<GetTemplateSummaryResult> responseHandler = new StaxResponseHandler<GetTemplateSummaryResult>( new GetTemplateSummaryResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } @Override public GetTemplateSummaryResult getTemplateSummary() { return getTemplateSummary(new GetTemplateSummaryRequest()); } /** * Returns descriptions of all resources of the specified stack. * * <p>For deleted stacks, ListStackResources returns resource information for up to 90 days after * the stack has been deleted. * * @param listStackResourcesRequest The input for the <a>ListStackResource</a> action. * @return Result of the ListStackResources operation returned by the service. * @sample AmazonCloudFormation.ListStackResources */ @Override public ListStackResourcesResult listStackResources( ListStackResourcesRequest listStackResourcesRequest) { ExecutionContext executionContext = createExecutionContext(listStackResourcesRequest); AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<ListStackResourcesRequest> request = null; Response<ListStackResourcesResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new ListStackResourcesRequestMarshaller() .marshall(super.beforeMarshalling(listStackResourcesRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<ListStackResourcesResult> responseHandler = new StaxResponseHandler<ListStackResourcesResult>( new ListStackResourcesResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * Returns the summary information for stacks whose status matches the specified * StackStatusFilter. Summary information for stacks that have been deleted is kept for 90 days * after the stack is deleted. If no StackStatusFilter is specified, summary information for all * stacks is returned (including existing stacks and stacks that have been deleted). * * @param listStacksRequest The input for <a>ListStacks</a> action. * @return Result of the ListStacks operation returned by the service. * @sample AmazonCloudFormation.ListStacks */ @Override public ListStacksResult listStacks(ListStacksRequest listStacksRequest) { ExecutionContext executionContext = createExecutionContext(listStacksRequest); AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<ListStacksRequest> request = null; Response<ListStacksResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new ListStacksRequestMarshaller().marshall(super.beforeMarshalling(listStacksRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<ListStacksResult> responseHandler = new StaxResponseHandler<ListStacksResult>(new ListStacksResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } @Override public ListStacksResult listStacks() { return listStacks(new ListStacksRequest()); } /** * Sets a stack policy for a specified stack. * * @param setStackPolicyRequest The input for the <a>SetStackPolicy</a> action. * @sample AmazonCloudFormation.SetStackPolicy */ @Override public void setStackPolicy(SetStackPolicyRequest setStackPolicyRequest) { ExecutionContext executionContext = createExecutionContext(setStackPolicyRequest); AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<SetStackPolicyRequest> request = null; Response<Void> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new SetStackPolicyRequestMarshaller() .marshall(super.beforeMarshalling(setStackPolicyRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<Void> responseHandler = new StaxResponseHandler<Void>(null); invoke(request, responseHandler, executionContext); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * Sends a signal to the specified resource with a success or failure status. You can use the * SignalResource API in conjunction with a creation policy or update policy. AWS CloudFormation * doesn't proceed with a stack creation or update until resources receive the required number of * signals or the timeout period is exceeded. The SignalResource API is useful in cases where you * want to send signals from anywhere other than an Amazon EC2 instance. * * @param signalResourceRequest The input for the <a>SignalResource</a> action. * @sample AmazonCloudFormation.SignalResource */ @Override public void signalResource(SignalResourceRequest signalResourceRequest) { ExecutionContext executionContext = createExecutionContext(signalResourceRequest); AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<SignalResourceRequest> request = null; Response<Void> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new SignalResourceRequestMarshaller() .marshall(super.beforeMarshalling(signalResourceRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<Void> responseHandler = new StaxResponseHandler<Void>(null); invoke(request, responseHandler, executionContext); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * Updates a stack as specified in the template. After the call completes successfully, the stack * update starts. You can check the status of the stack via the <a>DescribeStacks</a> action. * * <p>To get a copy of the template for an existing stack, you can use the <a>GetTemplate</a> * action. * * <p>Tags that were associated with this stack during creation time will still be associated with * the stack after an <code>UpdateStack</code> operation. * * <p>For more information about creating an update template, updating a stack, and monitoring the * progress of the update, see <a href= * "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks.html" * >Updating a Stack</a>. * * @param updateStackRequest The input for <a>UpdateStack</a> action. * @return Result of the UpdateStack operation returned by the service. * @throws InsufficientCapabilitiesException The template contains resources with capabilities * that were not specified in the Capabilities parameter. * @sample AmazonCloudFormation.UpdateStack */ @Override public UpdateStackResult updateStack(UpdateStackRequest updateStackRequest) { ExecutionContext executionContext = createExecutionContext(updateStackRequest); AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<UpdateStackRequest> request = null; Response<UpdateStackResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new UpdateStackRequestMarshaller() .marshall(super.beforeMarshalling(updateStackRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<UpdateStackResult> responseHandler = new StaxResponseHandler<UpdateStackResult>(new UpdateStackResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * Validates a specified template. * * @param validateTemplateRequest The input for <a>ValidateTemplate</a> action. * @return Result of the ValidateTemplate operation returned by the service. * @sample AmazonCloudFormation.ValidateTemplate */ @Override public ValidateTemplateResult validateTemplate(ValidateTemplateRequest validateTemplateRequest) { ExecutionContext executionContext = createExecutionContext(validateTemplateRequest); AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<ValidateTemplateRequest> request = null; Response<ValidateTemplateResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new ValidateTemplateRequestMarshaller() .marshall(super.beforeMarshalling(validateTemplateRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<ValidateTemplateResult> responseHandler = new StaxResponseHandler<ValidateTemplateResult>( new ValidateTemplateResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * Returns additional metadata for a previously executed successful, request, typically used for * debugging issues where a service isn't acting as expected. This data isn't considered part of * the result data returned by an operation, so it's available through this separate, diagnostic * interface. * * <p>Response metadata is only cached for a limited period of time, so if you need to access this * extra diagnostic information for an executed request, you should use this method to retrieve it * as soon as possible after executing the request. * * @param request The originally executed request * @return The response metadata for the specified request, or null if none is available. */ public ResponseMetadata getCachedResponseMetadata(AmazonWebServiceRequest request) { return client.getResponseMetadataForRequest(request); } private <X, Y extends AmazonWebServiceRequest> Response<X> invoke( Request<Y> request, HttpResponseHandler<AmazonWebServiceResponse<X>> responseHandler, ExecutionContext executionContext) { request.setEndpoint(endpoint); request.setTimeOffset(timeOffset); AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics(); AWSCredentials credentials; awsRequestMetrics.startEvent(Field.CredentialsRequestTime); try { credentials = awsCredentialsProvider.getCredentials(); } finally { awsRequestMetrics.endEvent(Field.CredentialsRequestTime); } AmazonWebServiceRequest originalRequest = request.getOriginalRequest(); if (originalRequest != null && originalRequest.getRequestCredentials() != null) { credentials = originalRequest.getRequestCredentials(); } executionContext.setCredentials(credentials); DefaultErrorResponseHandler errorResponseHandler = new DefaultErrorResponseHandler(exceptionUnmarshallers); return client.execute(request, responseHandler, errorResponseHandler, executionContext); } }
/** * The <code>SecurityProtocols</code> class holds all authentication and privacy protocols for a * SNMP entity. * * <p>To register security protocols other than the default, set the system property {@link * #SECURITY_PROTOCOLS_PROPERTIES} to a customized version of the <code>SecurityProtocols.properties * </code> file. The path has to be specified relatively to this class. * * @author Jochen Katz & Frank Fock * @version 1.9 */ public class SecurityProtocols implements Serializable { private static final long serialVersionUID = 3800474900139635836L; private Hashtable<OID, AuthenticationProtocol> authProtocols; private Hashtable<OID, PrivacyProtocol> privProtocols; public static final String SECURITY_PROTOCOLS_PROPERTIES = "org.snmp4j.securityProtocols"; private static final String SECURITY_PROTOCOLS_PROPERTIES_DEFAULT = "SecurityProtocols.properties"; private static final LogAdapter logger = LogFactory.getLogger(SecurityProtocols.class); private static SecurityProtocols instance = null; private int maxAuthDigestLength = 0; private int maxPrivDecryptParamsLength = 0; protected SecurityProtocols() { authProtocols = new Hashtable<OID, AuthenticationProtocol>(5); privProtocols = new Hashtable<OID, PrivacyProtocol>(5); } /** * Get an instance of class SecurityProtocols. * * @return the globally used SecurityProtocols object. */ public static SecurityProtocols getInstance() { if (instance == null) { instance = new SecurityProtocols(); } return instance; } /** * Set the <code>SecurityProtocols</code> * * @param securityProtocols SecurityProtocols */ public static void setSecurityProtocols(SecurityProtocols securityProtocols) { SecurityProtocols.instance = securityProtocols; } /** * Add the default SecurityProtocols. * * <p>The names of the SecurityProtocols to add are read from a properties file. * * @throws InternalError if the properties file cannot be opened/read. */ public synchronized void addDefaultProtocols() { if (SNMP4JSettings.isExtensibilityEnabled()) { String secProtocols = System.getProperty(SECURITY_PROTOCOLS_PROPERTIES, SECURITY_PROTOCOLS_PROPERTIES_DEFAULT); InputStream is = SecurityProtocols.class.getResourceAsStream(secProtocols); if (is == null) { throw new InternalError("Could not read '" + secProtocols + "' from classpath!"); } Properties props = new Properties(); try { props.load(is); for (Enumeration en = props.propertyNames(); en.hasMoreElements(); ) { String className = en.nextElement().toString(); try { Class c = Class.forName(className); Object proto = c.newInstance(); if (proto instanceof AuthenticationProtocol) { addAuthenticationProtocol((AuthenticationProtocol) proto); } else if (proto instanceof PrivacyProtocol) { addPrivacyProtocol((PrivacyProtocol) proto); } else { logger.error( "Failed to register security protocol because it does " + "not implement required interfaces: " + className); } } catch (Exception cnfe) { logger.error(cnfe); throw new InternalError(cnfe.toString()); } } } catch (IOException iox) { String txt = "Could not read '" + secProtocols + "': " + iox.getMessage(); logger.error(txt); throw new InternalError(txt); } finally { try { is.close(); } catch (IOException ex) { // ignore logger.warn(ex); } } } else { addAuthenticationProtocol(new AuthMD5()); addAuthenticationProtocol(new AuthSHA()); addPrivacyProtocol(new PrivDES()); addPrivacyProtocol(new PrivAES128()); addPrivacyProtocol(new PrivAES192()); addPrivacyProtocol(new PrivAES256()); } } /** * Add the given {@link AuthenticationProtocol}. If an authentication protocol with the supplied * ID already exists, the supplied authentication protocol will not be added and the security * protocols will not be unchang. * * @param auth the AuthenticationProtocol to add (an existing authentication protcol with <code> * auth</code>'s ID remains unchanged). */ public synchronized void addAuthenticationProtocol(AuthenticationProtocol auth) { if (authProtocols.get(auth.getID()) == null) { authProtocols.put(auth.getID(), auth); if (auth.getDigestLength() > maxAuthDigestLength) { maxAuthDigestLength = auth.getDigestLength(); } } } /** * Get the {@link AuthenticationProtocol} with the given ID. * * @param id The unique ID (specified as {@link OID}) of the AuthenticationProtocol. * @return the AuthenticationProtocol object if it was added before, or null if not. */ public AuthenticationProtocol getAuthenticationProtocol(OID id) { if (id == null) { return null; } return authProtocols.get(id); } /** * Remove the given {@link AuthenticationProtocol}. * * @param auth The protocol to remove */ public void removeAuthenticationProtocol(AuthenticationProtocol auth) { authProtocols.remove(auth.getID()); } /** * Add the given {@link PrivacyProtocol}. If a privacy protocol with the supplied ID already * exists, the supplied privacy protocol will not be added and the security protocols will not be * changed. * * @param priv the PrivacyProtocol to add (an existing privacy protcol with <code>priv</code>'s ID * remains unchanged). */ public synchronized void addPrivacyProtocol(PrivacyProtocol priv) { if (privProtocols.get(priv.getID()) == null) { privProtocols.put(priv.getID(), priv); if (priv.getDecryptParamsLength() > maxPrivDecryptParamsLength) { maxPrivDecryptParamsLength = priv.getDecryptParamsLength(); } } } /** * Get the PrivacyProtocol with the given ID. * * @param id The unique ID (specified as {@link OID}) of the PrivacyProtocol. * @return the {@link PrivacyProtocol} object if it was added before, or null if not. */ public PrivacyProtocol getPrivacyProtocol(OID id) { if (id == null) { return null; } return privProtocols.get(id); } /** * Remove the given {@link PrivacyProtocol}. * * @param priv The protocol to remove */ public void removePrivacyProtocol(PrivacyProtocol priv) { privProtocols.remove(priv.getID()); } /** * Generates the localized key for the given password and engine id for the authentication * protocol specified by the supplied OID. * * @param authProtocolID an <code>OID</code> identifying the authentication protocol to use. * @param passwordString the authentication pass phrase. * @param engineID the engine ID of the authoritative engine. * @return the localized authentication key. */ public byte[] passwordToKey(OID authProtocolID, OctetString passwordString, byte[] engineID) { AuthenticationProtocol protocol = authProtocols.get(authProtocolID); if (protocol == null) { return null; } return protocol.passwordToKey(passwordString, engineID); } /** * Generates the localized key for the given password and engine id for the privacy protocol * specified by the supplied OID. * * @param privProtocolID an <code>OID</code> identifying the privacy protocol the key should be * created for. * @param authProtocolID an <code>OID</code> identifying the authentication protocol to use. * @param passwordString the authentication pass phrase. * @param engineID the engine ID of the authoritative engine. * @return the localized privacy key. */ public byte[] passwordToKey( OID privProtocolID, OID authProtocolID, OctetString passwordString, byte[] engineID) { AuthenticationProtocol authProtocol = authProtocols.get(authProtocolID); if (authProtocol == null) { return null; } PrivacyProtocol privProtocol = privProtocols.get(privProtocolID); if (privProtocol == null) { return null; } byte[] key = authProtocol.passwordToKey(passwordString, engineID); if (key == null) { return null; } if (key.length >= privProtocol.getMinKeyLength()) { if (key.length > privProtocol.getMaxKeyLength()) { // truncate key byte[] truncatedKey = new byte[privProtocol.getMaxKeyLength()]; System.arraycopy(key, 0, truncatedKey, 0, privProtocol.getMaxKeyLength()); return truncatedKey; } return key; } // extend key if necessary byte[] extKey = privProtocol.extendShortKey(key, passwordString, engineID, authProtocol); return extKey; } /** * Gets the maximum authentication key length of the all known authentication protocols. * * @return the maximum authentication key length of all authentication protocols that have been * added to this <code>SecurityProtocols</code> instance. */ public int getMaxAuthDigestLength() { return maxAuthDigestLength; } /** * Gets the maximum privacy key length of the currently known privacy protocols. * * @return the maximum privacy key length of all privacy protocols that have been added to this * <code>SecurityProtocols</code> instance. */ public int getMaxPrivDecryptParamsLength() { return maxPrivDecryptParamsLength; } /** * Limits the supplied key value to the specified maximum length * * @param key the key to truncate. * @param maxKeyLength the maximum length of the returned key. * @return the truncated key with a length of <code>min(key.length, maxKeyLength)</code>. * @since 1.9 */ public byte[] truncateKey(byte[] key, int maxKeyLength) { byte[] truncatedNewKey = new byte[Math.min(maxKeyLength, key.length)]; System.arraycopy(key, 0, truncatedNewKey, 0, truncatedNewKey.length); return truncatedNewKey; } }
/** * Access logs associated with boolean properties * * <p>Do not call getLog multiple times on the same logger name. Since this is an internal API, no * checks are made to ensure that multiple logs do not exist for the same logger. */ public static Log getLog(String loggerName, String oldLogName, boolean override) { Level level = (override ? VERBOSE : null); return logFactory.createLog(loggerName, oldLogName, level); }
private void setMockLoggerFactory() { LogFactory.setFactory(MockLoggerFactory.class); }
@Test public void returnsLoggerFromRegisteredFactory() { setMockLoggerFactory(); Logger logger = LogFactory.getLogger(LogFactoryTest.class); assertThat(logger, is(instanceOf(MockLogger.class))); }
public class TestMRSequenceFileAsBinaryOutputFormat extends TestCase { private static final Log LOG = LogFactory.getLog(TestMRSequenceFileAsBinaryOutputFormat.class.getName()); private static final int RECORDS = 10000; public void testBinary() throws IOException, InterruptedException { Configuration conf = new Configuration(); Job job = new Job(conf); Path outdir = new Path(System.getProperty("test.build.data", "/tmp"), "outseq"); Random r = new Random(); long seed = r.nextLong(); r.setSeed(seed); FileOutputFormat.setOutputPath(job, outdir); SequenceFileAsBinaryOutputFormat.setSequenceFileOutputKeyClass(job, IntWritable.class); SequenceFileAsBinaryOutputFormat.setSequenceFileOutputValueClass(job, DoubleWritable.class); SequenceFileAsBinaryOutputFormat.setCompressOutput(job, true); SequenceFileAsBinaryOutputFormat.setOutputCompressionType(job, CompressionType.BLOCK); BytesWritable bkey = new BytesWritable(); BytesWritable bval = new BytesWritable(); TaskAttemptContext context = MapReduceTestUtil.createDummyMapTaskAttemptContext(job.getConfiguration()); OutputFormat<BytesWritable, BytesWritable> outputFormat = new SequenceFileAsBinaryOutputFormat(); OutputCommitter committer = outputFormat.getOutputCommitter(context); committer.setupJob(job); RecordWriter<BytesWritable, BytesWritable> writer = outputFormat.getRecordWriter(context); IntWritable iwritable = new IntWritable(); DoubleWritable dwritable = new DoubleWritable(); DataOutputBuffer outbuf = new DataOutputBuffer(); LOG.info("Creating data by SequenceFileAsBinaryOutputFormat"); try { for (int i = 0; i < RECORDS; ++i) { iwritable = new IntWritable(r.nextInt()); iwritable.write(outbuf); bkey.set(outbuf.getData(), 0, outbuf.getLength()); outbuf.reset(); dwritable = new DoubleWritable(r.nextDouble()); dwritable.write(outbuf); bval.set(outbuf.getData(), 0, outbuf.getLength()); outbuf.reset(); writer.write(bkey, bval); } } finally { writer.close(context); } committer.commitTask(context); committer.commitJob(job); InputFormat<IntWritable, DoubleWritable> iformat = new SequenceFileInputFormat<IntWritable, DoubleWritable>(); int count = 0; r.setSeed(seed); SequenceFileInputFormat.setInputPaths(job, outdir); LOG.info("Reading data by SequenceFileInputFormat"); for (InputSplit split : iformat.getSplits(job)) { RecordReader<IntWritable, DoubleWritable> reader = iformat.createRecordReader(split, context); MapContext<IntWritable, DoubleWritable, BytesWritable, BytesWritable> mcontext = new MapContextImpl<IntWritable, DoubleWritable, BytesWritable, BytesWritable>( job.getConfiguration(), context.getTaskAttemptID(), reader, null, null, MapReduceTestUtil.createDummyReporter(), split); reader.initialize(split, mcontext); try { int sourceInt; double sourceDouble; while (reader.nextKeyValue()) { sourceInt = r.nextInt(); sourceDouble = r.nextDouble(); iwritable = reader.getCurrentKey(); dwritable = reader.getCurrentValue(); assertEquals( "Keys don't match: " + "*" + iwritable.get() + ":" + sourceInt + "*", sourceInt, iwritable.get()); assertTrue( "Vals don't match: " + "*" + dwritable.get() + ":" + sourceDouble + "*", Double.compare(dwritable.get(), sourceDouble) == 0); ++count; } } finally { reader.close(); } } assertEquals("Some records not found", RECORDS, count); } public void testSequenceOutputClassDefaultsToMapRedOutputClass() throws IOException { Job job = new Job(); // Setting Random class to test getSequenceFileOutput{Key,Value}Class job.setOutputKeyClass(FloatWritable.class); job.setOutputValueClass(BooleanWritable.class); assertEquals( "SequenceFileOutputKeyClass should default to ouputKeyClass", FloatWritable.class, SequenceFileAsBinaryOutputFormat.getSequenceFileOutputKeyClass(job)); assertEquals( "SequenceFileOutputValueClass should default to " + "ouputValueClass", BooleanWritable.class, SequenceFileAsBinaryOutputFormat.getSequenceFileOutputValueClass(job)); SequenceFileAsBinaryOutputFormat.setSequenceFileOutputKeyClass(job, IntWritable.class); SequenceFileAsBinaryOutputFormat.setSequenceFileOutputValueClass(job, DoubleWritable.class); assertEquals( "SequenceFileOutputKeyClass not updated", IntWritable.class, SequenceFileAsBinaryOutputFormat.getSequenceFileOutputKeyClass(job)); assertEquals( "SequenceFileOutputValueClass not updated", DoubleWritable.class, SequenceFileAsBinaryOutputFormat.getSequenceFileOutputValueClass(job)); } public void testcheckOutputSpecsForbidRecordCompression() throws IOException { Job job = Job.getInstance(new Configuration(), "testcheckOutputSpecsForbidRecordCompression"); FileSystem fs = FileSystem.getLocal(job.getConfiguration()); Path outputdir = new Path(System.getProperty("test.build.data", "/tmp") + "/output"); fs.delete(outputdir, true); // Without outputpath, FileOutputFormat.checkoutputspecs will throw // InvalidJobConfException FileOutputFormat.setOutputPath(job, outputdir); // SequenceFileAsBinaryOutputFormat doesn't support record compression // It should throw an exception when checked by checkOutputSpecs SequenceFileAsBinaryOutputFormat.setCompressOutput(job, true); SequenceFileAsBinaryOutputFormat.setOutputCompressionType(job, CompressionType.BLOCK); try { new SequenceFileAsBinaryOutputFormat().checkOutputSpecs(job); } catch (Exception e) { fail( "Block compression should be allowed for " + "SequenceFileAsBinaryOutputFormat:Caught " + e.getClass().getName()); } SequenceFileAsBinaryOutputFormat.setOutputCompressionType(job, CompressionType.RECORD); try { new SequenceFileAsBinaryOutputFormat().checkOutputSpecs(job); fail("Record compression should not be allowed for " + "SequenceFileAsBinaryOutputFormat"); } catch (InvalidJobConfException ie) { // expected } catch (Exception e) { fail( "Expected " + InvalidJobConfException.class.getName() + "but caught " + e.getClass().getName()); } } }
/** * Generic session containing id of logged user. * @author Zdenda * */ public class GenericSession extends AuthenticatedWebSession { private static final long serialVersionUID = 1L; protected final Log logger = LogFactory.getLog(getClass()); /** * Id of logged member */ private Long memberId; @SpringBean(name = "membernetManager") private MembernetManager membernetManager; public GenericSession(Request request) { super(request); //so the autowiring works org.apache.wicket.injection.Injector.get().inject(this); } /** * For now, the username will be the memberId. * Method will try to parse the memberId and check if it exists. * If the parsing is ok and member with this id exists, then true is returned. */ @Override public boolean authenticate(String username, String password) { try { //try to parse it memberId = Long.parseLong(username); if (membernetManager == null) { logger.error("MembernetManager is null."); return false; } //check if exists if(membernetManager.exists(memberId)) { logger.debug("Successfully logged as member id="+memberId); return true; } else { logger.debug("Failed to log as member id="+memberId+". Member doesn't exist."); return false; } } catch (NumberFormatException e) { logger.warn("Error when parsing memberId: "+username); return false; } } @Override public void invalidate() { super.invalidate(); memberId = null; } @Override public Roles getRoles() { return null; } public long getLoggedMemberId() { return memberId; } }
/** * A simple RPC mechanism. * * <p>A <i>protocol</i> is a Java interface. All parameters and return types must be one of: * * <ul> * <li>a primitive type, <code>boolean</code>, <code>byte</code>, <code>char</code>, <code>short * </code>, <code>int</code>, <code>long</code>, <code>float</code>, <code>double</code>, or * <code>void</code>; or * <li>a {@link String}; or * <li>a {@link Writable}; or * <li>an array of the above types * </ul> * * All methods in the protocol should throw only IOException. No field data of the protocol instance * is transmitted. */ @InterfaceAudience.LimitedPrivate(value = {"Common", "HDFS", "MapReduce", "Yarn"}) @InterfaceStability.Evolving public class RPC { static final int RPC_SERVICE_CLASS_DEFAULT = 0; public enum RpcKind { RPC_BUILTIN((short) 1), // Used for built in calls by tests RPC_WRITABLE((short) 2), // Use WritableRpcEngine RPC_PROTOCOL_BUFFER((short) 3); // Use ProtobufRpcEngine static final short MAX_INDEX = RPC_PROTOCOL_BUFFER.value; // used for array size public final short value; // TODO make it private RpcKind(short val) { this.value = val; } } interface RpcInvoker { /** * Process a client call on the server side * * @param server the server within whose context this rpc call is made * @param protocol - the protocol name (the class of the client proxy used to make calls to the * rpc server. * @param rpcRequest - deserialized * @param receiveTime time at which the call received (for metrics) * @return the call's return * @throws IOException */ public Writable call(Server server, String protocol, Writable rpcRequest, long receiveTime) throws Exception; } static final Log LOG = LogFactory.getLog(RPC.class); /** * Get all superInterfaces that extend VersionedProtocol * * @param childInterfaces * @return the super interfaces that extend VersionedProtocol */ static Class<?>[] getSuperInterfaces(Class<?>[] childInterfaces) { List<Class<?>> allInterfaces = new ArrayList<Class<?>>(); for (Class<?> childInterface : childInterfaces) { if (VersionedProtocol.class.isAssignableFrom(childInterface)) { allInterfaces.add(childInterface); allInterfaces.addAll(Arrays.asList(getSuperInterfaces(childInterface.getInterfaces()))); } else { LOG.warn( "Interface " + childInterface + " ignored because it does not extend VersionedProtocol"); } } return allInterfaces.toArray(new Class[allInterfaces.size()]); } /** * Get all interfaces that the given protocol implements or extends which are assignable from * VersionedProtocol. */ static Class<?>[] getProtocolInterfaces(Class<?> protocol) { Class<?>[] interfaces = protocol.getInterfaces(); return getSuperInterfaces(interfaces); } /** * Get the protocol name. If the protocol class has a ProtocolAnnotation, then get the protocol * name from the annotation; otherwise the class name is the protocol name. */ public static String getProtocolName(Class<?> protocol) { if (protocol == null) { return null; } ProtocolInfo anno = protocol.getAnnotation(ProtocolInfo.class); return (anno == null) ? protocol.getName() : anno.protocolName(); } /** * Get the protocol version from protocol class. If the protocol class has a ProtocolAnnotation, * then get the protocol name from the annotation; otherwise the class name is the protocol name. */ public static long getProtocolVersion(Class<?> protocol) { if (protocol == null) { throw new IllegalArgumentException("Null protocol"); } long version; ProtocolInfo anno = protocol.getAnnotation(ProtocolInfo.class); if (anno != null) { version = anno.protocolVersion(); if (version != -1) return version; } try { Field versionField = protocol.getField("versionID"); versionField.setAccessible(true); return versionField.getLong(protocol); } catch (NoSuchFieldException ex) { throw new RuntimeException(ex); } catch (IllegalAccessException ex) { throw new RuntimeException(ex); } } private RPC() {} // no public ctor // cache of RpcEngines by protocol private static final Map<Class<?>, RpcEngine> PROTOCOL_ENGINES = new HashMap<Class<?>, RpcEngine>(); private static final String ENGINE_PROP = "rpc.engine"; /** * Set a protocol to use a non-default RpcEngine. * * @param conf configuration to use * @param protocol the protocol interface * @param engine the RpcEngine impl */ public static void setProtocolEngine(Configuration conf, Class<?> protocol, Class<?> engine) { conf.setClass(ENGINE_PROP + "." + protocol.getName(), engine, RpcEngine.class); } // return the RpcEngine configured to handle a protocol static synchronized RpcEngine getProtocolEngine(Class<?> protocol, Configuration conf) { RpcEngine engine = PROTOCOL_ENGINES.get(protocol); if (engine == null) { Class<?> impl = conf.getClass(ENGINE_PROP + "." + protocol.getName(), WritableRpcEngine.class); engine = (RpcEngine) ReflectionUtils.newInstance(impl, conf); PROTOCOL_ENGINES.put(protocol, engine); } return engine; } /** A version mismatch for the RPC protocol. */ public static class VersionMismatch extends RpcServerException { private static final long serialVersionUID = 0; private String interfaceName; private long clientVersion; private long serverVersion; /** * Create a version mismatch exception * * @param interfaceName the name of the protocol mismatch * @param clientVersion the client's version of the protocol * @param serverVersion the server's version of the protocol */ public VersionMismatch(String interfaceName, long clientVersion, long serverVersion) { super( "Protocol " + interfaceName + " version mismatch. (client = " + clientVersion + ", server = " + serverVersion + ")"); this.interfaceName = interfaceName; this.clientVersion = clientVersion; this.serverVersion = serverVersion; } /** * Get the interface name * * @return the java class name (eg. org.apache.hadoop.mapred.InterTrackerProtocol) */ public String getInterfaceName() { return interfaceName; } /** Get the client's preferred version */ public long getClientVersion() { return clientVersion; } /** Get the server's agreed to version. */ public long getServerVersion() { return serverVersion; } /** get the rpc status corresponding to this exception */ public RpcStatusProto getRpcStatusProto() { return RpcStatusProto.ERROR; } /** get the detailed rpc status corresponding to this exception */ public RpcErrorCodeProto getRpcErrorCodeProto() { return RpcErrorCodeProto.ERROR_RPC_VERSION_MISMATCH; } } /** * Get a proxy connection to a remote server * * @param protocol protocol class * @param clientVersion client version * @param addr remote address * @param conf configuration to use * @return the proxy * @throws IOException if the far end through a RemoteException */ public static <T> T waitForProxy( Class<T> protocol, long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException { return waitForProtocolProxy(protocol, clientVersion, addr, conf).getProxy(); } /** * Get a protocol proxy that contains a proxy connection to a remote server and a set of methods * that are supported by the server * * @param protocol protocol class * @param clientVersion client version * @param addr remote address * @param conf configuration to use * @return the protocol proxy * @throws IOException if the far end through a RemoteException */ public static <T> ProtocolProxy<T> waitForProtocolProxy( Class<T> protocol, long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException { return waitForProtocolProxy(protocol, clientVersion, addr, conf, Long.MAX_VALUE); } /** * Get a proxy connection to a remote server * * @param protocol protocol class * @param clientVersion client version * @param addr remote address * @param conf configuration to use * @param connTimeout time in milliseconds before giving up * @return the proxy * @throws IOException if the far end through a RemoteException */ public static <T> T waitForProxy( Class<T> protocol, long clientVersion, InetSocketAddress addr, Configuration conf, long connTimeout) throws IOException { return waitForProtocolProxy(protocol, clientVersion, addr, conf, connTimeout).getProxy(); } /** * Get a protocol proxy that contains a proxy connection to a remote server and a set of methods * that are supported by the server * * @param protocol protocol class * @param clientVersion client version * @param addr remote address * @param conf configuration to use * @param connTimeout time in milliseconds before giving up * @return the protocol proxy * @throws IOException if the far end through a RemoteException */ public static <T> ProtocolProxy<T> waitForProtocolProxy( Class<T> protocol, long clientVersion, InetSocketAddress addr, Configuration conf, long connTimeout) throws IOException { return waitForProtocolProxy(protocol, clientVersion, addr, conf, 0, null, connTimeout); } /** * Get a proxy connection to a remote server * * @param protocol protocol class * @param clientVersion client version * @param addr remote address * @param conf configuration to use * @param rpcTimeout timeout for each RPC * @param timeout time in milliseconds before giving up * @return the proxy * @throws IOException if the far end through a RemoteException */ public static <T> T waitForProxy( Class<T> protocol, long clientVersion, InetSocketAddress addr, Configuration conf, int rpcTimeout, long timeout) throws IOException { return waitForProtocolProxy(protocol, clientVersion, addr, conf, rpcTimeout, null, timeout) .getProxy(); } /** * Get a protocol proxy that contains a proxy connection to a remote server and a set of methods * that are supported by the server * * @param protocol protocol class * @param clientVersion client version * @param addr remote address * @param conf configuration to use * @param rpcTimeout timeout for each RPC * @param timeout time in milliseconds before giving up * @return the proxy * @throws IOException if the far end through a RemoteException */ public static <T> ProtocolProxy<T> waitForProtocolProxy( Class<T> protocol, long clientVersion, InetSocketAddress addr, Configuration conf, int rpcTimeout, RetryPolicy connectionRetryPolicy, long timeout) throws IOException { long startTime = Time.now(); IOException ioe; while (true) { try { return getProtocolProxy( protocol, clientVersion, addr, UserGroupInformation.getCurrentUser(), conf, NetUtils.getDefaultSocketFactory(conf), rpcTimeout, connectionRetryPolicy); } catch (ConnectException se) { // namenode has not been started LOG.info("Server at " + addr + " not available yet, Zzzzz..."); ioe = se; } catch (SocketTimeoutException te) { // namenode is busy LOG.info("Problem connecting to server: " + addr); ioe = te; } catch (NoRouteToHostException nrthe) { // perhaps a VIP is failing over LOG.info("No route to host for server: " + addr); ioe = nrthe; } // check if timed out if (Time.now() - timeout >= startTime) { throw ioe; } // wait for retry try { Thread.sleep(1000); } catch (InterruptedException ie) { // IGNORE } } } /** * Construct a client-side proxy object that implements the named protocol, talking to a server at * the named address. * * @param <T> */ public static <T> T getProxy( Class<T> protocol, long clientVersion, InetSocketAddress addr, Configuration conf, SocketFactory factory) throws IOException { return getProtocolProxy(protocol, clientVersion, addr, conf, factory).getProxy(); } /** * Get a protocol proxy that contains a proxy connection to a remote server and a set of methods * that are supported by the server * * @param protocol protocol class * @param clientVersion client version * @param addr remote address * @param conf configuration to use * @param factory socket factory * @return the protocol proxy * @throws IOException if the far end through a RemoteException */ public static <T> ProtocolProxy<T> getProtocolProxy( Class<T> protocol, long clientVersion, InetSocketAddress addr, Configuration conf, SocketFactory factory) throws IOException { UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); return getProtocolProxy(protocol, clientVersion, addr, ugi, conf, factory); } /** * Construct a client-side proxy object that implements the named protocol, talking to a server at * the named address. * * @param <T> */ public static <T> T getProxy( Class<T> protocol, long clientVersion, InetSocketAddress addr, UserGroupInformation ticket, Configuration conf, SocketFactory factory) throws IOException { return getProtocolProxy(protocol, clientVersion, addr, ticket, conf, factory).getProxy(); } /** * Get a protocol proxy that contains a proxy connection to a remote server and a set of methods * that are supported by the server * * @param protocol protocol class * @param clientVersion client version * @param addr remote address * @param ticket user group information * @param conf configuration to use * @param factory socket factory * @return the protocol proxy * @throws IOException if the far end through a RemoteException */ public static <T> ProtocolProxy<T> getProtocolProxy( Class<T> protocol, long clientVersion, InetSocketAddress addr, UserGroupInformation ticket, Configuration conf, SocketFactory factory) throws IOException { return getProtocolProxy(protocol, clientVersion, addr, ticket, conf, factory, 0, null); } /** * Construct a client-side proxy that implements the named protocol, talking to a server at the * named address. * * @param <T> * @param protocol protocol * @param clientVersion client's version * @param addr server address * @param ticket security ticket * @param conf configuration * @param factory socket factory * @param rpcTimeout max time for each rpc; 0 means no timeout * @return the proxy * @throws IOException if any error occurs */ public static <T> T getProxy( Class<T> protocol, long clientVersion, InetSocketAddress addr, UserGroupInformation ticket, Configuration conf, SocketFactory factory, int rpcTimeout) throws IOException { return getProtocolProxy(protocol, clientVersion, addr, ticket, conf, factory, rpcTimeout, null) .getProxy(); } /** * Get a protocol proxy that contains a proxy connection to a remote server and a set of methods * that are supported by the server * * @param protocol protocol * @param clientVersion client's version * @param addr server address * @param ticket security ticket * @param conf configuration * @param factory socket factory * @param rpcTimeout max time for each rpc; 0 means no timeout * @return the proxy * @throws IOException if any error occurs */ public static <T> ProtocolProxy<T> getProtocolProxy( Class<T> protocol, long clientVersion, InetSocketAddress addr, UserGroupInformation ticket, Configuration conf, SocketFactory factory, int rpcTimeout, RetryPolicy connectionRetryPolicy) throws IOException { if (UserGroupInformation.isSecurityEnabled()) { SaslRpcServer.init(conf); } return getProtocolEngine(protocol, conf) .getProxy( protocol, clientVersion, addr, ticket, conf, factory, rpcTimeout, connectionRetryPolicy); } /** * Construct a client-side proxy object with the default SocketFactory * * @param <T> * @param protocol * @param clientVersion * @param addr * @param conf * @return a proxy instance * @throws IOException */ public static <T> T getProxy( Class<T> protocol, long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException { return getProtocolProxy(protocol, clientVersion, addr, conf).getProxy(); } /** Returns the server address for a given proxy. */ public static InetSocketAddress getServerAddress(Object proxy) { return getConnectionIdForProxy(proxy).getAddress(); } /** * Return the connection ID of the given object. If the provided object is in fact a protocol * translator, we'll get the connection ID of the underlying proxy object. * * @param proxy the proxy object to get the connection ID of. * @return the connection ID for the provided proxy object. */ public static ConnectionId getConnectionIdForProxy(Object proxy) { if (proxy instanceof ProtocolTranslator) { proxy = ((ProtocolTranslator) proxy).getUnderlyingProxyObject(); } RpcInvocationHandler inv = (RpcInvocationHandler) Proxy.getInvocationHandler(proxy); return inv.getConnectionId(); } /** * Get a protocol proxy that contains a proxy connection to a remote server and a set of methods * that are supported by the server * * @param protocol * @param clientVersion * @param addr * @param conf * @return a protocol proxy * @throws IOException */ public static <T> ProtocolProxy<T> getProtocolProxy( Class<T> protocol, long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException { return getProtocolProxy( protocol, clientVersion, addr, conf, NetUtils.getDefaultSocketFactory(conf)); } /** * Stop the proxy. Proxy must either implement {@link Closeable} or must have associated {@link * RpcInvocationHandler}. * * @param proxy the RPC proxy object to be stopped * @throws HadoopIllegalArgumentException if the proxy does not implement {@link Closeable} * interface or does not have closeable {@link InvocationHandler} */ public static void stopProxy(Object proxy) { if (proxy == null) { throw new HadoopIllegalArgumentException("Cannot close proxy since it is null"); } try { if (proxy instanceof Closeable) { ((Closeable) proxy).close(); return; } else { InvocationHandler handler = Proxy.getInvocationHandler(proxy); if (handler instanceof Closeable) { ((Closeable) handler).close(); return; } } } catch (IOException e) { LOG.error("Closing proxy or invocation handler caused exception", e); } catch (IllegalArgumentException e) { LOG.error("RPC.stopProxy called on non proxy: class=" + proxy.getClass().getName(), e); } // If you see this error on a mock object in a unit test you're // developing, make sure to use MockitoUtil.mockProtocol() to // create your mock. throw new HadoopIllegalArgumentException( "Cannot close proxy - is not Closeable or " + "does not provide closeable invocation handler " + proxy.getClass()); } /** Class to construct instances of RPC server with specific options. */ public static class Builder { private Class<?> protocol = null; private Object instance = null; private String bindAddress = "0.0.0.0"; private int port = 0; private int numHandlers = 1; private int numReaders = -1; private int queueSizePerHandler = -1; private boolean verbose = false; private final Configuration conf; private SecretManager<? extends TokenIdentifier> secretManager = null; private String portRangeConfig = null; public Builder(Configuration conf) { this.conf = conf; } /** Mandatory field */ public Builder setProtocol(Class<?> protocol) { this.protocol = protocol; return this; } /** Mandatory field */ public Builder setInstance(Object instance) { this.instance = instance; return this; } /** Default: 0.0.0.0 */ public Builder setBindAddress(String bindAddress) { this.bindAddress = bindAddress; return this; } /** Default: 0 */ public Builder setPort(int port) { this.port = port; return this; } /** Default: 1 */ public Builder setNumHandlers(int numHandlers) { this.numHandlers = numHandlers; return this; } /** Default: -1 */ public Builder setnumReaders(int numReaders) { this.numReaders = numReaders; return this; } /** Default: -1 */ public Builder setQueueSizePerHandler(int queueSizePerHandler) { this.queueSizePerHandler = queueSizePerHandler; return this; } /** Default: false */ public Builder setVerbose(boolean verbose) { this.verbose = verbose; return this; } /** Default: null */ public Builder setSecretManager(SecretManager<? extends TokenIdentifier> secretManager) { this.secretManager = secretManager; return this; } /** Default: null */ public Builder setPortRangeConfig(String portRangeConfig) { this.portRangeConfig = portRangeConfig; return this; } /** * Build the RPC Server. * * @throws IOException on error * @throws HadoopIllegalArgumentException when mandatory fields are not set */ public Server build() throws IOException, HadoopIllegalArgumentException { if (this.conf == null) { throw new HadoopIllegalArgumentException("conf is not set"); } if (this.protocol == null) { throw new HadoopIllegalArgumentException("protocol is not set"); } if (this.instance == null) { throw new HadoopIllegalArgumentException("instance is not set"); } return getProtocolEngine(this.protocol, this.conf) .getServer( this.protocol, this.instance, this.bindAddress, this.port, this.numHandlers, this.numReaders, this.queueSizePerHandler, this.verbose, this.conf, this.secretManager, this.portRangeConfig); } } /** An RPC Server. */ public abstract static class Server extends org.apache.hadoop.ipc.Server { boolean verbose; static String classNameBase(String className) { String[] names = className.split("\\.", -1); if (names == null || names.length == 0) { return className; } return names[names.length - 1]; } /** Store a map of protocol and version to its implementation */ /** The key in Map */ static class ProtoNameVer { final String protocol; final long version; ProtoNameVer(String protocol, long ver) { this.protocol = protocol; this.version = ver; } @Override public boolean equals(Object o) { if (o == null) return false; if (this == o) return true; if (!(o instanceof ProtoNameVer)) return false; ProtoNameVer pv = (ProtoNameVer) o; return ((pv.protocol.equals(this.protocol)) && (pv.version == this.version)); } @Override public int hashCode() { return protocol.hashCode() * 37 + (int) version; } } /** The value in map */ static class ProtoClassProtoImpl { final Class<?> protocolClass; final Object protocolImpl; ProtoClassProtoImpl(Class<?> protocolClass, Object protocolImpl) { this.protocolClass = protocolClass; this.protocolImpl = protocolImpl; } } ArrayList<Map<ProtoNameVer, ProtoClassProtoImpl>> protocolImplMapArray = new ArrayList<Map<ProtoNameVer, ProtoClassProtoImpl>>(RpcKind.MAX_INDEX); Map<ProtoNameVer, ProtoClassProtoImpl> getProtocolImplMap(RPC.RpcKind rpcKind) { if (protocolImplMapArray.size() == 0) { // initialize for all rpc kinds for (int i = 0; i <= RpcKind.MAX_INDEX; ++i) { protocolImplMapArray.add(new HashMap<ProtoNameVer, ProtoClassProtoImpl>(10)); } } return protocolImplMapArray.get(rpcKind.ordinal()); } // Register protocol and its impl for rpc calls void registerProtocolAndImpl(RpcKind rpcKind, Class<?> protocolClass, Object protocolImpl) { String protocolName = RPC.getProtocolName(protocolClass); long version; try { version = RPC.getProtocolVersion(protocolClass); } catch (Exception ex) { LOG.warn("Protocol " + protocolClass + " NOT registered as cannot get protocol version "); return; } getProtocolImplMap(rpcKind) .put( new ProtoNameVer(protocolName, version), new ProtoClassProtoImpl(protocolClass, protocolImpl)); LOG.debug( "RpcKind = " + rpcKind + " Protocol Name = " + protocolName + " version=" + version + " ProtocolImpl=" + protocolImpl.getClass().getName() + " protocolClass=" + protocolClass.getName()); } static class VerProtocolImpl { final long version; final ProtoClassProtoImpl protocolTarget; VerProtocolImpl(long ver, ProtoClassProtoImpl protocolTarget) { this.version = ver; this.protocolTarget = protocolTarget; } } VerProtocolImpl[] getSupportedProtocolVersions(RPC.RpcKind rpcKind, String protocolName) { VerProtocolImpl[] resultk = new VerProtocolImpl[getProtocolImplMap(rpcKind).size()]; int i = 0; for (Map.Entry<ProtoNameVer, ProtoClassProtoImpl> pv : getProtocolImplMap(rpcKind).entrySet()) { if (pv.getKey().protocol.equals(protocolName)) { resultk[i++] = new VerProtocolImpl(pv.getKey().version, pv.getValue()); } } if (i == 0) { return null; } VerProtocolImpl[] result = new VerProtocolImpl[i]; System.arraycopy(resultk, 0, result, 0, i); return result; } VerProtocolImpl getHighestSupportedProtocol(RpcKind rpcKind, String protocolName) { Long highestVersion = 0L; ProtoClassProtoImpl highest = null; if (LOG.isDebugEnabled()) { LOG.debug("Size of protoMap for " + rpcKind + " =" + getProtocolImplMap(rpcKind).size()); } for (Map.Entry<ProtoNameVer, ProtoClassProtoImpl> pv : getProtocolImplMap(rpcKind).entrySet()) { if (pv.getKey().protocol.equals(protocolName)) { if ((highest == null) || (pv.getKey().version > highestVersion)) { highest = pv.getValue(); highestVersion = pv.getKey().version; } } } if (highest == null) { return null; } return new VerProtocolImpl(highestVersion, highest); } protected Server( String bindAddress, int port, Class<? extends Writable> paramClass, int handlerCount, int numReaders, int queueSizePerHandler, Configuration conf, String serverName, SecretManager<? extends TokenIdentifier> secretManager, String portRangeConfig) throws IOException { super( bindAddress, port, paramClass, handlerCount, numReaders, queueSizePerHandler, conf, serverName, secretManager, portRangeConfig); initProtocolMetaInfo(conf); } private void initProtocolMetaInfo(Configuration conf) { RPC.setProtocolEngine(conf, ProtocolMetaInfoPB.class, ProtobufRpcEngine.class); ProtocolMetaInfoServerSideTranslatorPB xlator = new ProtocolMetaInfoServerSideTranslatorPB(this); BlockingService protocolInfoBlockingService = ProtocolInfoService.newReflectiveBlockingService(xlator); addProtocol( RpcKind.RPC_PROTOCOL_BUFFER, ProtocolMetaInfoPB.class, protocolInfoBlockingService); } /** * Add a protocol to the existing server. * * @param protocolClass - the protocol class * @param protocolImpl - the impl of the protocol that will be called * @return the server (for convenience) */ public Server addProtocol(RpcKind rpcKind, Class<?> protocolClass, Object protocolImpl) { registerProtocolAndImpl(rpcKind, protocolClass, protocolImpl); return this; } @Override public Writable call( RPC.RpcKind rpcKind, String protocol, Writable rpcRequest, long receiveTime) throws Exception { return getRpcInvoker(rpcKind).call(this, protocol, rpcRequest, receiveTime); } } }
/** * To generate automatically reports from list mode. * * <p>Uses JasperReports. * * @author Javier Paniza */ public class GenerateReportServlet extends HttpServlet { private static Log log = LogFactory.getLog(GenerateReportServlet.class); public static class TableModelDecorator implements TableModel { private TableModel original; private List metaProperties; private boolean withValidValues = false; private Locale locale; private boolean labelAsHeader = false; private HttpServletRequest request; private boolean format = false; // format or no the values. If format = true, all values to the report are String private Integer columnCountLimit; public TableModelDecorator( HttpServletRequest request, TableModel original, List metaProperties, Locale locale, boolean labelAsHeader, boolean format, Integer columnCountLimit) throws Exception { this.request = request; this.original = original; this.metaProperties = metaProperties; this.locale = locale; this.withValidValues = calculateWithValidValues(); this.labelAsHeader = labelAsHeader; this.format = format; this.columnCountLimit = columnCountLimit; } private boolean calculateWithValidValues() { Iterator it = metaProperties.iterator(); while (it.hasNext()) { MetaProperty m = (MetaProperty) it.next(); if (m.hasValidValues()) return true; } return false; } private MetaProperty getMetaProperty(int i) { return (MetaProperty) metaProperties.get(i); } public int getRowCount() { return original.getRowCount(); } public int getColumnCount() { return columnCountLimit == null ? original.getColumnCount() : columnCountLimit; } public String getColumnName(int c) { return labelAsHeader ? getMetaProperty(c).getLabel(locale) : Strings.change(getMetaProperty(c).getQualifiedName(), ".", "_"); } public Class getColumnClass(int c) { return original.getColumnClass(c); } public boolean isCellEditable(int row, int column) { return original.isCellEditable(row, column); } public Object getValueAt(int row, int column) { if (isFormat()) return getValueWithWebEditorsFormat(row, column); else return getValueWithoutWebEditorsFormat(row, column); } private Object getValueWithoutWebEditorsFormat(int row, int column) { Object r = original.getValueAt(row, column); if (r instanceof Boolean) { if (((Boolean) r).booleanValue()) return XavaResources.getString(locale, "yes"); return XavaResources.getString(locale, "no"); } if (withValidValues) { MetaProperty p = getMetaProperty(column); if (p.hasValidValues()) { return p.getValidValueLabel(locale, original.getValueAt(row, column)); } } if (r instanceof java.util.Date) { MetaProperty p = getMetaProperty(column); // In order to use the type declared by the developer // and not the one returned by JDBC or the JPA engine if (java.sql.Time.class.isAssignableFrom(p.getType())) { return DateFormat.getTimeInstance(DateFormat.SHORT, locale).format(r); } if (java.sql.Timestamp.class.isAssignableFrom(p.getType())) { DateFormat dateFormat = new SimpleDateFormat("dd/MM/yyyy HH:mm:ss"); return dateFormat.format(r); } return DateFormat.getDateInstance(DateFormat.SHORT, locale).format(r); } if (r instanceof BigDecimal) { return formatBigDecimal(r, locale); } return r; } private Object getValueWithWebEditorsFormat(int row, int column) { Object r = original.getValueAt(row, column); MetaProperty metaProperty = getMetaProperty(column); String result = WebEditors.format(this.request, metaProperty, r, null, "", true); if (isHtml(result)) { // this avoids that the report shows html content result = WebEditors.format(this.request, metaProperty, r, null, "", false); } return result; } public void setValueAt(Object value, int row, int column) { original.setValueAt(value, row, column); } public void addTableModelListener(TableModelListener l) { original.addTableModelListener(l); } public void removeTableModelListener(TableModelListener l) { original.removeTableModelListener(l); } private boolean isHtml(String value) { return value.matches("<.*>"); } public boolean isFormat() { return format; } public void setFormat(boolean format) { this.format = format; } } protected void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { try { Locales.setCurrent(request); if (Users.getCurrent() == null) { // for a bug in websphere portal 5.1 with Domino LDAP Users.setCurrent((String) request.getSession().getAttribute("xava.user")); } request.getParameter("application"); // for a bug in websphere 5.1 request.getParameter("module"); // for a bug in websphere 5.1 Tab tab = (Tab) request.getSession().getAttribute("xava_reportTab"); int[] selectedRowsNumber = (int[]) request.getSession().getAttribute("xava_selectedRowsReportTab"); Map[] selectedKeys = (Map[]) request.getSession().getAttribute("xava_selectedKeysReportTab"); int[] selectedRows = getSelectedRows(selectedRowsNumber, selectedKeys, tab); request.getSession().removeAttribute("xava_selectedRowsReportTab"); Integer columnCountLimit = (Integer) request.getSession().getAttribute("xava_columnCountLimitReportTab"); request.getSession().removeAttribute("xava_columnCountLimitReportTab"); setDefaultSchema(request); String user = (String) request.getSession().getAttribute("xava_user"); request.getSession().removeAttribute("xava_user"); Users.setCurrent(user); String uri = request.getRequestURI(); if (uri.endsWith(".pdf")) { InputStream is; JRDataSource ds; Map parameters = new HashMap(); synchronized (tab) { tab.setRequest(request); parameters.put("Title", tab.getTitle()); parameters.put("Organization", getOrganization()); parameters.put("Date", getCurrentDate()); for (String totalProperty : tab.getTotalPropertiesNames()) { parameters.put(totalProperty + "__TOTAL__", getTotal(request, tab, totalProperty)); } TableModel tableModel = getTableModel(request, tab, selectedRows, false, true, null); tableModel.getValueAt(0, 0); if (tableModel.getRowCount() == 0) { generateNoRowsPage(response); return; } is = getReport(request, response, tab, tableModel, columnCountLimit); ds = new JRTableModelDataSource(tableModel); } JasperPrint jprint = JasperFillManager.fillReport(is, parameters, ds); response.setContentType("application/pdf"); response.setHeader( "Content-Disposition", "inline; filename=\"" + getFileName(tab) + ".pdf\""); JasperExportManager.exportReportToPdfStream(jprint, response.getOutputStream()); } else if (uri.endsWith(".csv")) { String csvEncoding = XavaPreferences.getInstance().getCSVEncoding(); if (!Is.emptyString(csvEncoding)) { response.setCharacterEncoding(csvEncoding); } response.setContentType("text/x-csv"); response.setHeader( "Content-Disposition", "inline; filename=\"" + getFileName(tab) + ".csv\""); synchronized (tab) { tab.setRequest(request); response .getWriter() .print( TableModels.toCSV( getTableModel(request, tab, selectedRows, true, false, columnCountLimit))); } } else { throw new ServletException( XavaResources.getString("report_type_not_supported", "", ".pdf .csv")); } } catch (Exception ex) { log.error(ex.getMessage(), ex); throw new ServletException(XavaResources.getString("report_error")); } finally { request.getSession().removeAttribute("xava_reportTab"); } } private void generateNoRowsPage(HttpServletResponse response) throws Exception { response.setContentType("text/html"); response.getWriter().println("<html><head><title>"); response.getWriter().println(XavaResources.getString("no_rows_report_message_title")); response .getWriter() .println( "</title></head><body style='font-family:Tahoma,Arial,sans-serif;color:black;background-color:white;'>"); response.getWriter().println("<h1 style='font-size:22px;'>"); response.getWriter().println(XavaResources.getString("no_rows_report_message_title")); response.getWriter().println("</h1>"); response.getWriter().println("<p style='font-size:16px;'>"); response.getWriter().println(XavaResources.getString("no_rows_report_message_detail")); response.getWriter().println("</p></body></html>"); } private String getCurrentDate() { return java.text.DateFormat.getDateInstance(DateFormat.MEDIUM, Locales.getCurrent()) .format(new java.util.Date()); } private String getFileName(Tab tab) { String now = new SimpleDateFormat("yyyyMMdd_HHmm").format(new Date()); return tab.getTitle() + " " + now; } private Object getTotal(HttpServletRequest request, Tab tab, String totalProperty) { Object total = tab.getTotal(totalProperty); return WebEditors.format( request, tab.getMetaProperty(totalProperty), total, new Messages(), null, true); } private void setDefaultSchema(HttpServletRequest request) { String hibernateDefaultSchemaTab = (String) request.getSession().getAttribute("xava_hibernateDefaultSchemaTab"); if (hibernateDefaultSchemaTab != null) { request.getSession().removeAttribute("xava_hibernateDefaultSchemaTab"); XHibernate.setDefaultSchema(hibernateDefaultSchemaTab); } String jpaDefaultSchemaTab = (String) request.getSession().getAttribute("xava_jpaDefaultSchemaTab"); if (jpaDefaultSchemaTab != null) { request.getSession().removeAttribute("xava_jpaDefaultSchemaTab"); XPersistence.setDefaultSchema(jpaDefaultSchemaTab); } } protected String getOrganization() throws MissingResourceException, XavaException { return ReportParametersProviderFactory.getInstance().getOrganization(); } private InputStream getReport( HttpServletRequest request, HttpServletResponse response, Tab tab, TableModel tableModel, Integer columnCountLimit) throws ServletException, IOException { StringBuffer suri = new StringBuffer(); suri.append("/xava/jasperReport"); suri.append("?language="); suri.append(Locales.getCurrent().getLanguage()); suri.append("&widths="); suri.append(Arrays.toString(getWidths(tableModel))); if (columnCountLimit != null) { suri.append("&columnCountLimit="); suri.append(columnCountLimit); } response.setCharacterEncoding(XSystem.getEncoding()); return Servlets.getURIAsStream(request, response, suri.toString()); } private int[] getWidths(TableModel tableModel) { int[] widths = new int[tableModel.getColumnCount()]; for (int r = 0; r < Math.min(tableModel.getRowCount(), 500); r++) { // 500 is not for performance, but for using only a sample of data with huge table for (int c = 0; c < tableModel.getColumnCount(); c++) { Object o = tableModel.getValueAt(r, c); if (o instanceof String) { String s = ((String) o).trim(); if (s.length() > widths[c]) widths[c] = s.length(); } } } return widths; } private TableModel getTableModel( HttpServletRequest request, Tab tab, int[] selectedRows, boolean labelAsHeader, boolean format, Integer columnCountLimit) throws Exception { TableModel data = null; if (selectedRows != null && selectedRows.length > 0) { data = new SelectedRowsXTableModel(tab.getTableModel(), selectedRows); } else { data = tab.getAllDataTableModel(); } return new TableModelDecorator( request, data, tab.getMetaProperties(), Locales.getCurrent(), labelAsHeader, format, columnCountLimit); } private static Object formatBigDecimal(Object number, Locale locale) { NumberFormat nf = NumberFormat.getNumberInstance(locale); nf.setMinimumFractionDigits(2); return nf.format(number); } private int[] getSelectedRows(int[] selectedRowsNumber, Map[] selectedRowsKeys, Tab tab) { if (selectedRowsKeys == null || selectedRowsKeys.length == 0) return new int[0]; // selectedRowsNumber is the most performant so we use it when possible else if (selectedRowsNumber.length == selectedRowsKeys.length) return selectedRowsNumber; else { // find the rows from the selectedKeys // This has a poor performance, but it covers the case when the selected // rows are not loaded for the tab, something that can occurs if the user // select rows and afterwards reorder the list. try { int[] s = new int[selectedRowsKeys.length]; List selectedKeys = Arrays.asList(selectedRowsKeys); int end = tab.getTableModel().getTotalSize(); int x = 0; for (int i = 0; i < end; i++) { Map key = (Map) tab.getTableModel().getObjectAt(i); if (selectedKeys.contains(key)) { s[x] = i; x++; } } return s; } catch (Exception ex) { log.warn(XavaResources.getString("fails_selected"), ex); throw new XavaException("fails_selected"); } } } }