public void testNativeFilterWithCompoundSlicerWithAggs() { propSaver.set(MondrianProperties.instance().UseAggregates, true); propSaver.set(MondrianProperties.instance().ReadAggregates, true); propSaver.set(MondrianProperties.instance().GenerateFormattedSql, true); final String mdx = "with member measures.avgQtrs as 'avg( filter( time.quarter.members, measures.[unit sales] > 80))' " + "select measures.avgQtrs * gender.members on 0 from sales where head( product.[product name].members, 3)"; if (MondrianProperties.instance().EnableNativeFilter.get() && MondrianProperties.instance().EnableNativeNonEmpty.get()) { final String sqlMysql = "select\n" + " `agg_c_14_sales_fact_1997`.`the_year` as `c0`,\n" + " `agg_c_14_sales_fact_1997`.`quarter` as `c1`\n" + "from\n" + " `agg_c_14_sales_fact_1997` as `agg_c_14_sales_fact_1997`,\n" + " `product` as `product`,\n" + " `customer` as `customer`\n" + "where\n" + " `agg_c_14_sales_fact_1997`.`product_id` = `product`.`product_id`\n" + "and\n" + " `product`.`product_name` in ('Good Imported Beer', 'Good Light Beer', 'Pearl Imported Beer')\n" + "and\n" + " `agg_c_14_sales_fact_1997`.`customer_id` = `customer`.`customer_id`\n" + "and\n" + " `customer`.`gender` = 'M'\n" + "group by\n" + " `agg_c_14_sales_fact_1997`.`the_year`,\n" + " `agg_c_14_sales_fact_1997`.`quarter`\n" + "having\n" + " (sum(`agg_c_14_sales_fact_1997`.`unit_sales`) > 80)\n" + "order by\n" + (TestContext.instance().getDialect().requiresOrderByAlias() ? " ISNULL(`c0`) ASC, `c0` ASC,\n" + " ISNULL(`c1`) ASC, `c1` ASC" : " ISNULL(`agg_c_14_sales_fact_1997`.`the_year`) ASC, `agg_c_14_sales_fact_1997`.`the_year` ASC,\n" + " ISNULL(`agg_c_14_sales_fact_1997`.`quarter`) ASC, `agg_c_14_sales_fact_1997`.`quarter` ASC"); final SqlPattern[] patterns = mysqlPattern(sqlMysql); // Make sure the tuples list is using the HAVING clause. assertQuerySqlOrNot(getTestContext(), mdx, patterns, false, true, true); } // Make sure the numbers are right assertQueryReturns( mdx, "Axis #0:\n" + "{[Product].[Drink].[Alcoholic Beverages].[Beer and Wine].[Beer].[Good].[Good Imported Beer]}\n" + "{[Product].[Drink].[Alcoholic Beverages].[Beer and Wine].[Beer].[Good].[Good Light Beer]}\n" + "{[Product].[Drink].[Alcoholic Beverages].[Beer and Wine].[Beer].[Pearl].[Pearl Imported Beer]}\n" + "Axis #1:\n" + "{[Measures].[avgQtrs], [Gender].[All Gender]}\n" + "{[Measures].[avgQtrs], [Gender].[F]}\n" + "{[Measures].[avgQtrs], [Gender].[M]}\n" + "Row #0: 111\n" + "Row #0: \n" + "Row #0: \n"); }
public SegmentCacheManager(MondrianServer server) { this.server = server; ACTOR = new Actor(); thread = new Thread(ACTOR, "mondrian.rolap.agg.SegmentCacheManager$ACTOR"); thread.setDaemon(true); thread.start(); // Create the index registry. this.indexRegistry = new SegmentCacheIndexRegistry(); // Add a local cache, if needed. if (!MondrianProperties.instance().DisableCaching.get()) { final MemorySegmentCache cache = new MemorySegmentCache(); segmentCacheWorkers.add(new SegmentCacheWorker(cache, thread)); } // Add an external cache, if configured. final List<SegmentCache> externalCache = SegmentCacheWorker.initCache(); for (SegmentCache cache : externalCache) { // Create a worker for this external cache segmentCacheWorkers.add(new SegmentCacheWorker(cache, thread)); // Hook up a listener so it can update // the segment index. cache.addListener(new AsyncCacheListener(this, server)); } compositeCache = new CompositeSegmentCache(segmentCacheWorkers); }
public void testJndiConnection() throws NamingException { // Cannot guarantee that this test will work if they have chosen to // resolve data sources other than by JNDI. if (MondrianProperties.instance().DataSourceResolverClass.isSet()) { return; } // get a regular connection Util.PropertyList properties = TestContext.instance().getConnectionProperties().clone(); final StringBuilder buf = new StringBuilder(); final DataSource dataSource = RolapConnection.createDataSource(null, properties, buf); // Don't know what the connect string is - it differs with database // and with the user's set up - but we know that it contains a JDBC // connect string. Best we can do is check that createDataSource is // setting it to something. final String desc = buf.toString(); assertTrue(desc, desc.startsWith("Jdbc=")); final List<String> lookupCalls = new ArrayList<String>(); // mock the JNDI naming manager to provide that datasource THREAD_INITIAL_CONTEXT.set( // Use lazy initialization. Otherwise during initialization of this // initial context JNDI tries to create a default initial context // and bumps into itself coming the other way. new InitialContext(true) { public Object lookup(String str) { lookupCalls.add("Called"); return dataSource; } }); // Use the datasource property to connect to the database. // Remove user and password, because some data sources (those using // pools) don't allow you to override user. Util.PropertyList properties2 = TestContext.instance().getConnectionProperties().clone(); properties2.remove(RolapConnectionProperties.Jdbc.name()); properties2.remove(RolapConnectionProperties.JdbcUser.name()); properties2.remove(RolapConnectionProperties.JdbcPassword.name()); properties2.put(RolapConnectionProperties.DataSource.name(), "jnditest"); DriverManager.getConnection(properties2, null); // if we've made it here with lookupCalls, // we've successfully used JNDI assertTrue(lookupCalls.size() > 0); }
public void testNegativeMatching() throws Exception { if (!MondrianProperties.instance().EnableNativeFilter.get()) { // No point testing these if the native filters // are turned off. return; } final String sqlOracle = "select \"customer\".\"country\" as \"c0\", \"customer\".\"state_province\" as \"c1\", \"customer\".\"city\" as \"c2\", \"customer\".\"customer_id\" as \"c3\", \"fname\" || ' ' || \"lname\" as \"c4\", \"fname\" || ' ' || \"lname\" as \"c5\", \"customer\".\"gender\" as \"c6\", \"customer\".\"marital_status\" as \"c7\", \"customer\".\"education\" as \"c8\", \"customer\".\"yearly_income\" as \"c9\" from \"customer\" \"customer\" group by \"customer\".\"country\", \"customer\".\"state_province\", \"customer\".\"city\", \"customer\".\"customer_id\", \"fname\" || ' ' || \"lname\", \"customer\".\"gender\", \"customer\".\"marital_status\", \"customer\".\"education\", \"customer\".\"yearly_income\" having NOT(REGEXP_LIKE(\"fname\" || ' ' || \"lname\", '.*jeanne.*', 'i')) order by \"customer\".\"country\" ASC NULLS LAST, \"customer\".\"state_province\" ASC NULLS LAST, \"customer\".\"city\" ASC NULLS LAST, \"fname\" || ' ' || \"lname\" ASC NULLS LAST"; final String sqlPgsql = "select \"customer\".\"country\" as \"c0\", \"customer\".\"state_province\" as \"c1\", \"customer\".\"city\" as \"c2\", \"customer\".\"customer_id\" as \"c3\", fullname as \"c4\", fullname as \"c5\", \"customer\".\"gender\" as \"c6\", \"customer\".\"marital_status\" as \"c7\", \"customer\".\"education\" as \"c8\", \"customer\".\"yearly_income\" as \"c9\" from \"customer\" as \"customer\" group by \"customer\".\"country\", \"customer\".\"state_province\", \"customer\".\"city\", \"customer\".\"customer_id\", fullname, \"customer\".\"gender\", \"customer\".\"marital_status\", \"customer\".\"education\", \"customer\".\"yearly_income\" having NOT(cast(fullname as text) ~ '(?i).*jeanne.*') order by \"customer\".\"country\" ASC NULLS LAST, \"customer\".\"state_province\" ASC NULLS LAST, \"customer\".\"city\" ASC NULLS LAST, fullname ASC NULLS LAST"; final String sqlMysql = "select `customer`.`country` as `c0`, `customer`.`state_province` as `c1`, `customer`.`city` as `c2`, `customer`.`customer_id` as `c3`, CONCAT(`customer`.`fname`, ' ', `customer`.`lname`) as `c4`, CONCAT(`customer`.`fname`, ' ', `customer`.`lname`) as `c5`, `customer`.`gender` as `c6`, `customer`.`marital_status` as `c7`, `customer`.`education` as `c8`, `customer`.`yearly_income` as `c9` from `customer` as `customer` group by `customer`.`country`, `customer`.`state_province`, `customer`.`city`, `customer`.`customer_id`, CONCAT(`customer`.`fname`, ' ', `customer`.`lname`), `customer`.`gender`, `customer`.`marital_status`, `customer`.`education`, `customer`.`yearly_income` having NOT(UPPER(c5) REGEXP '.*JEANNE.*') order by " + (TestContext.instance().getDialect().requiresOrderByAlias() ? "ISNULL(`c0`) ASC, `c0` ASC, " + "ISNULL(`c1`) ASC, `c1` ASC, " + "ISNULL(`c2`) ASC, `c2` ASC, " + "ISNULL(`c4`) ASC, `c4` ASC" : "ISNULL(`customer`.`country`) ASC, `customer`.`country` ASC, ISNULL(`customer`.`state_province`) ASC, `customer`.`state_province` ASC, ISNULL(`customer`.`city`) ASC, `customer`.`city` ASC, ISNULL(CONCAT(`customer`.`fname`, ' ', `customer`.`lname`)) ASC, CONCAT(`customer`.`fname`, ' ', `customer`.`lname`) ASC"); SqlPattern[] patterns = { new SqlPattern(Dialect.DatabaseProduct.ORACLE, sqlOracle, sqlOracle.length()), new SqlPattern(Dialect.DatabaseProduct.MYSQL, sqlMysql, sqlMysql.length()), new SqlPattern(Dialect.DatabaseProduct.POSTGRESQL, sqlPgsql, sqlPgsql.length()) }; final String query = "With\n" + "Set [*NATIVE_CJ_SET] as 'Filter([*BASE_MEMBERS_Customers], Not IsEmpty ([Measures].[Unit Sales]))'\n" + "Set [*SORTED_COL_AXIS] as 'Order([*CJ_COL_AXIS],[Customers].CurrentMember.OrderKey,BASC,Ancestor([Customers].CurrentMember,[Customers].[City]).OrderKey,BASC)'\n" + "Set [*BASE_MEMBERS_Customers] as 'Filter([Customers].[Name].Members,[Customers].CurrentMember.Caption Not Matches (\"(?i).*\\Qjeanne\\E.*\"))'\n" + "Set [*BASE_MEMBERS_Measures] as '{[Measures].[*FORMATTED_MEASURE_0]}'\n" + "Set [*CJ_COL_AXIS] as 'Generate([*NATIVE_CJ_SET], {([Customers].currentMember)})'\n" + "Member [Measures].[*FORMATTED_MEASURE_0] as '[Measures].[Unit Sales]', FORMAT_STRING = 'Standard', SOLVE_ORDER=400\n" + "Select\n" + "CrossJoin([*SORTED_COL_AXIS],[*BASE_MEMBERS_Measures]) on columns\n" + "From [Sales]"; assertQuerySqlOrNot(getTestContext(), query, patterns, false, true, true); final Result result = executeQuery(query); final String resultString = TestContext.toString(result); assertFalse(resultString.contains("Jeanne")); verifySameNativeAndNot(query, null, getTestContext()); }
protected Result run() { getConnection().getCacheControl(null).flushSchemaCache(); IntegerProperty monLimit = MondrianProperties.instance().ResultLimit; int oldLimit = monLimit.get(); try { monLimit.set(this.resultLimit); Result result = executeQuery(query, con); // Check the number of positions on the last axis, which is // the ROWS axis in a 2 axis query. int numAxes = result.getAxes().length; Axis a = result.getAxes()[numAxes - 1]; final int positionCount = a.getPositions().size(); assertEquals(rowCount, positionCount); return result; } finally { monLimit.set(oldLimit); } }
public RolapNativeFilter() { super.setEnabled(MondrianProperties.instance().EnableNativeFilter.get()); }
public void testNativeFilterWithCompoundSlicer_1() { propSaver.set(MondrianProperties.instance().GenerateFormattedSql, true); final String mdx = "with member [measures].[avgQtrs] as 'count(filter([Customers].[Name].Members, [Measures].[Unit Sales] > 0))' " + "select [measures].[avgQtrs] on 0 from sales where ( {[Product].[Drink].[Alcoholic Beverages].[Beer and Wine].[Beer], [Product].[Food].[Baked Goods].[Bread].[Muffins]} )"; if (MondrianProperties.instance().EnableNativeFilter.get() && MondrianProperties.instance().EnableNativeNonEmpty.get()) { boolean requiresOrderByAlias = TestContext.instance().getDialect().requiresOrderByAlias(); final String sqlMysql = propSaver.properties.UseAggregates.get() == false ? "select\n" + " `customer`.`country` as `c0`,\n" + " `customer`.`state_province` as `c1`,\n" + " `customer`.`city` as `c2`,\n" + " `customer`.`customer_id` as `c3`,\n" + " CONCAT(`customer`.`fname`, ' ', `customer`.`lname`) as `c4`,\n" + " CONCAT(`customer`.`fname`, ' ', `customer`.`lname`) as `c5`,\n" + " `customer`.`gender` as `c6`,\n" + " `customer`.`marital_status` as `c7`,\n" + " `customer`.`education` as `c8`,\n" + " `customer`.`yearly_income` as `c9`\n" + "from\n" + " `customer` as `customer`,\n" + " `sales_fact_1997` as `sales_fact_1997`,\n" + " `time_by_day` as `time_by_day`,\n" + " `product_class` as `product_class`,\n" + " `product` as `product`\n" + "where\n" + " `sales_fact_1997`.`customer_id` = `customer`.`customer_id`\n" + "and\n" + " `sales_fact_1997`.`time_id` = `time_by_day`.`time_id`\n" + "and\n" + " `time_by_day`.`the_year` = 1997\n" + "and\n" + " `sales_fact_1997`.`product_id` = `product`.`product_id`\n" // + "and\n" + " `product`.`product_class_id` = `product_class`.`product_class_id`\n" + "and\n" + " `product_class`.`product_family` in ('Drink', 'Food')\n" + "and\n" + " `product_class`.`product_department` in ('Alcoholic Beverages', 'Baked Goods')\n" + "and\n" + " `product_class`.`product_category` in ('Beer and Wine', 'Bread')\n" + "and\n" + " `product_class`.`product_subcategory` in ('Beer', 'Muffins')\n" + "group by\n" + " `customer`.`country`,\n" + " `customer`.`state_province`,\n" + " `customer`.`city`,\n" + " `customer`.`customer_id`,\n" + " CONCAT(`customer`.`fname`, ' ', `customer`.`lname`),\n" + " `customer`.`gender`,\n" + " `customer`.`marital_status`,\n" + " `customer`.`education`,\n" + " `customer`.`yearly_income`\n" + "having\n" + " (sum(`sales_fact_1997`.`unit_sales`) > 0)\n" // ^^^^ This is what we are interested in. ^^^^ + "order by\n" + (requiresOrderByAlias ? " ISNULL(`c0`) ASC, `c0` ASC,\n" + " ISNULL(`c1`) ASC, `c1` ASC,\n" + " ISNULL(`c2`) ASC, `c2` ASC,\n" + " ISNULL(`c4`) ASC, `c4` ASC" : " ISNULL(`customer`.`country`) ASC, `customer`.`country` ASC,\n" + " ISNULL(`customer`.`state_province`) ASC, `customer`.`state_province` ASC,\n" + " ISNULL(`customer`.`city`) ASC, `customer`.`city` ASC,\n" + " ISNULL(CONCAT(`customer`.`fname`, ' ', `customer`.`lname`)) ASC, CONCAT(`customer`.`fname`, ' ', `customer`.`lname`) ASC") : "select\n" + " `customer`.`country` as `c0`,\n" + " `customer`.`state_province` as `c1`,\n" + " `customer`.`city` as `c2`,\n" + " `customer`.`customer_id` as `c3`,\n" + " CONCAT(`customer`.`fname`, ' ', `customer`.`lname`) as `c4`,\n" + " CONCAT(`customer`.`fname`, ' ', `customer`.`lname`) as `c5`,\n" + " `customer`.`gender` as `c6`,\n" + " `customer`.`marital_status` as `c7`,\n" + " `customer`.`education` as `c8`,\n" + " `customer`.`yearly_income` as `c9`\n" + "from\n" + " `customer` as `customer`,\n" + " `agg_c_14_sales_fact_1997` as `agg_c_14_sales_fact_1997`,\n" + " `product_class` as `product_class`,\n" + " `product` as `product`\n" + "where\n" + " `agg_c_14_sales_fact_1997`.`customer_id` = `customer`.`customer_id`\n" + "and\n" + " `agg_c_14_sales_fact_1997`.`the_year` = 1997\n" + "and\n" + " `agg_c_14_sales_fact_1997`.`product_id` = `product`.`product_id`\n" + "and\n" + " `product`.`product_class_id` = `product_class`.`product_class_id`\n" + "and\n" + " `product_class`.`product_family` in ('Drink', 'Food')\n" + "and\n" + " `product_class`.`product_department` in ('Alcoholic Beverages', 'Baked Goods')\n" + "and\n" + " `product_class`.`product_category` in ('Beer and Wine', 'Bread')\n" + "and\n" + " `product_class`.`product_subcategory` in ('Beer', 'Muffins')\n" + "group by\n" + " `customer`.`country`,\n" + " `customer`.`state_province`,\n" + " `customer`.`city`,\n" + " `customer`.`customer_id`,\n" + " CONCAT(`customer`.`fname`, ' ', `customer`.`lname`),\n" + " `customer`.`gender`,\n" + " `customer`.`marital_status`,\n" + " `customer`.`education`,\n" + " `customer`.`yearly_income`\n" + "having\n" + " (sum(`agg_c_14_sales_fact_1997`.`unit_sales`) > 0)\n" // ^^^^ This is what we are interested in. ^^^^ + "order by\n" + " ISNULL(`customer`.`country`) ASC, `customer`.`country` ASC,\n" + " ISNULL(`customer`.`state_province`) ASC, `customer`.`state_province` ASC,\n" + " ISNULL(`customer`.`city`) ASC, `customer`.`city` ASC,\n" + " ISNULL(CONCAT(`customer`.`fname`, ' ', `customer`.`lname`)) ASC, CONCAT(`customer`.`fname`, ' ', `customer`.`lname`) ASC"; final SqlPattern[] patterns = mysqlPattern(sqlMysql); // Make sure the tuples list is using the HAVING clause. assertQuerySqlOrNot(getTestContext(), mdx, patterns, false, true, true); } // Make sure the numbers are right assertQueryReturns( mdx, "Axis #0:\n" + "{[Product].[Drink].[Alcoholic Beverages].[Beer and Wine].[Beer]}\n" + "{[Product].[Food].[Baked Goods].[Bread].[Muffins]}\n" + "Axis #1:\n" + "{[Measures].[avgQtrs]}\n" + "Row #0: 1,281\n"); }
public void testPositiveMatching() throws Exception { if (!MondrianProperties.instance().EnableNativeFilter.get()) { // No point testing these if the native filters // are turned off. return; } final String sqlOracle = "select \"customer\".\"country\" as \"c0\", \"customer\".\"state_province\" as \"c1\", \"customer\".\"city\" as \"c2\", \"customer\".\"customer_id\" as \"c3\", \"fname\" || ' ' || \"lname\" as \"c4\", \"fname\" || ' ' || \"lname\" as \"c5\", \"customer\".\"gender\" as \"c6\", \"customer\".\"marital_status\" as \"c7\", \"customer\".\"education\" as \"c8\", \"customer\".\"yearly_income\" as \"c9\" from \"customer\" \"customer\" group by \"customer\".\"country\", \"customer\".\"state_province\", \"customer\".\"city\", \"customer\".\"customer_id\", \"fname\" || ' ' || \"lname\", \"customer\".\"gender\", \"customer\".\"marital_status\", \"customer\".\"education\", \"customer\".\"yearly_income\" having REGEXP_LIKE(\"fname\" || ' ' || \"lname\", '.*jeanne.*', 'i') order by \"customer\".\"country\" ASC NULLS LAST, \"customer\".\"state_province\" ASC NULLS LAST, \"customer\".\"city\" ASC NULLS LAST, \"fname\" || ' ' || \"lname\" ASC NULLS LAST"; final String sqlPgsql = "select \"customer\".\"country\" as \"c0\", \"customer\".\"state_province\" as \"c1\", \"customer\".\"city\" as \"c2\", \"customer\".\"customer_id\" as \"c3\", fullname as \"c4\", fullname as \"c5\", \"customer\".\"gender\" as \"c6\", \"customer\".\"marital_status\" as \"c7\", \"customer\".\"education\" as \"c8\", \"customer\".\"yearly_income\" as \"c9\" from \"customer\" as \"customer\" group by \"customer\".\"country\", \"customer\".\"state_province\", \"customer\".\"city\", \"customer\".\"customer_id\", fullname, \"customer\".\"gender\", \"customer\".\"marital_status\", \"customer\".\"education\", \"customer\".\"yearly_income\" having cast(fullname as text) ~ '(?i).*jeanne.*' order by \"customer\".\"country\" ASC NULLS LAST, \"customer\".\"state_province\" ASC NULLS LAST, \"customer\".\"city\" ASC NULLS LAST, fullname ASC NULLS LAST"; final String sqlMysql = "select `customer`.`country` as `c0`, `customer`.`state_province` as `c1`, `customer`.`city` as `c2`, `customer`.`customer_id` as `c3`, CONCAT(`customer`.`fname`, ' ', `customer`.`lname`) as `c4`, CONCAT(`customer`.`fname`, ' ', `customer`.`lname`) as `c5`, `customer`.`gender` as `c6`, `customer`.`marital_status` as `c7`, `customer`.`education` as `c8`, `customer`.`yearly_income` as `c9` from `customer` as `customer` group by `customer`.`country`, `customer`.`state_province`, `customer`.`city`, `customer`.`customer_id`, CONCAT(`customer`.`fname`, ' ', `customer`.`lname`), `customer`.`gender`, `customer`.`marital_status`, `customer`.`education`, `customer`.`yearly_income` having UPPER(c5) REGEXP '.*JEANNE.*' order by " + (TestContext.instance().getDialect().requiresOrderByAlias() ? "ISNULL(`c0`) ASC, `c0` ASC, " + "ISNULL(`c1`) ASC, `c1` ASC, " + "ISNULL(`c2`) ASC, `c2` ASC, " + "ISNULL(`c4`) ASC, `c4` ASC" : "ISNULL(`customer`.`country`) ASC, `customer`.`country` ASC, ISNULL(`customer`.`state_province`) ASC, `customer`.`state_province` ASC, ISNULL(`customer`.`city`) ASC, `customer`.`city` ASC, ISNULL(CONCAT(`customer`.`fname`, ' ', `customer`.`lname`)) ASC, CONCAT(`customer`.`fname`, ' ', `customer`.`lname`) ASC"); SqlPattern[] patterns = { new SqlPattern(Dialect.DatabaseProduct.ORACLE, sqlOracle, sqlOracle.length()), new SqlPattern(Dialect.DatabaseProduct.MYSQL, sqlMysql, sqlMysql.length()), new SqlPattern(Dialect.DatabaseProduct.POSTGRESQL, sqlPgsql, sqlPgsql.length()) }; final String queryResults = "Axis #0:\n" + "{}\n" + "Axis #1:\n" + "{[Customers].[USA].[WA].[Issaquah].[Jeanne Derry], [Measures].[*FORMATTED_MEASURE_0]}\n" + "{[Customers].[USA].[CA].[Los Angeles].[Jeannette Eldridge], [Measures].[*FORMATTED_MEASURE_0]}\n" + "{[Customers].[USA].[CA].[Burbank].[Jeanne Bohrnstedt], [Measures].[*FORMATTED_MEASURE_0]}\n" + "{[Customers].[USA].[OR].[Portland].[Jeanne Zysko], [Measures].[*FORMATTED_MEASURE_0]}\n" + "{[Customers].[USA].[WA].[Everett].[Jeanne McDill], [Measures].[*FORMATTED_MEASURE_0]}\n" + "{[Customers].[USA].[CA].[West Covina].[Jeanne Whitaker], [Measures].[*FORMATTED_MEASURE_0]}\n" + "{[Customers].[USA].[WA].[Everett].[Jeanne Turner], [Measures].[*FORMATTED_MEASURE_0]}\n" + "{[Customers].[USA].[WA].[Puyallup].[Jeanne Wentz], [Measures].[*FORMATTED_MEASURE_0]}\n" + "{[Customers].[USA].[OR].[Albany].[Jeannette Bura], [Measures].[*FORMATTED_MEASURE_0]}\n" + "{[Customers].[USA].[WA].[Lynnwood].[Jeanne Ibarra], [Measures].[*FORMATTED_MEASURE_0]}\n" + "Row #0: 50\n" + "Row #0: 21\n" + "Row #0: 31\n" + "Row #0: 42\n" + "Row #0: 110\n" + "Row #0: 59\n" + "Row #0: 42\n" + "Row #0: 157\n" + "Row #0: 146\n" + "Row #0: 78\n"; final String query = "With\n" + "Set [*NATIVE_CJ_SET] as 'Filter([*BASE_MEMBERS_Customers], Not IsEmpty ([Measures].[Unit Sales]))'\n" + "Set [*SORTED_COL_AXIS] as 'Order([*CJ_COL_AXIS],[Customers].CurrentMember.OrderKey,BASC,Ancestor([Customers].CurrentMember,[Customers].[City]).OrderKey,BASC)'\n" + "Set [*BASE_MEMBERS_Customers] as 'Filter([Customers].[Name].Members,[Customers].CurrentMember.Caption Matches (\"(?i).*\\Qjeanne\\E.*\"))'\n" + "Set [*BASE_MEMBERS_Measures] as '{[Measures].[*FORMATTED_MEASURE_0]}'\n" + "Set [*CJ_COL_AXIS] as 'Generate([*NATIVE_CJ_SET], {([Customers].currentMember)})'\n" + "Member [Measures].[*FORMATTED_MEASURE_0] as '[Measures].[Unit Sales]', FORMAT_STRING = 'Standard', SOLVE_ORDER=400\n" + "Select\n" + "CrossJoin([*SORTED_COL_AXIS],[*BASE_MEMBERS_Measures]) on columns\n" + "From [Sales]"; assertQuerySqlOrNot(getTestContext(), query, patterns, false, true, true); assertQueryReturns(query, queryResults); verifySameNativeAndNot(query, null, getTestContext()); }
public List<RolapMember> close() { final boolean asList = this.constraint.getEvaluator() != null && this.constraint.getEvaluator().getQuery().getResultStyle() == ResultStyle.LIST; final int limit = MondrianProperties.instance().ResultLimit.get(); final List<RolapMember> l = new AbstractList<RolapMember>() { private boolean moreRows = true; private int offset = 0; private RolapMember first = null; private boolean firstMemberAssigned = false; /** Performs a load of the whole result set. */ public int size() { while (this.moreRows) { this.moreRows = sqlTupleReader.readNextTuple(); if (limit > 0 && !asList && getList().size() > limit) { System.out.println("Target: 199, Ouch! Toooo big array..." + this.hashCode()); new Throwable().printStackTrace(); } } return getList().size(); } public RolapMember get(final int idx) { if (asList) { return getList().get(idx); } if (idx == 0 && this.firstMemberAssigned) { return this.first; } int index = idx - offset; if (0 < limit && index < 0) { // Cannot send NoSuchElementException since its intercepted // by AbstractSequentialList to identify out of bounds. throw new RuntimeException("Element " + idx + " has been forgotten"); } while (index >= getList().size() && this.moreRows) { this.moreRows = sqlTupleReader.readNextTuple(); if (limit > 0 && getList().size() > limit) { while (getList().size() > limit) { index--; offset++; ((LinkedList) getList()).removeFirst(); } } } if (idx == 0) { this.first = getList().get(index); // Above might run into exception which is caught in // isEmpty(). So can change the state of the object after // that. this.firstMemberAssigned = true; return this.first; } else { return getList().get(index); } } public RolapMember set(final int i, final RolapMember e) { if (asList) { return getList().set(i, e); } else { throw new UnsupportedOperationException(); } } public boolean isEmpty() { try { get(0); return false; } catch (IndexOutOfBoundsException e) { return true; } } public int hashCode() { return Target.this.hashCode(); } public Iterator<RolapMember> iterator() { return new Iterator<RolapMember>() { private int cursor = 0; public boolean hasNext() { try { get(cursor); return true; } catch (IndexOutOfBoundsException ioobe) { return false; } } public RolapMember next() { return get(cursor++); } public void remove() { throw new UnsupportedOperationException(); } }; } }; if (asList) { l.size(); } return l; }
/** * Runs a query twice, with native crossjoin optimization enabled and disabled. If both results * are equal,and both aggree with the expected result, it is considered correct. * * <p>Optionally the query can be run with fresh connection. This is useful if the test case sets * its certain mondrian properties, e.g. native properties like: mondrian.native.filter.enable * * @param resultLimit Maximum result size of all the MDX operations in this query. This might be * hard to estimate as it is usually larger than the rowCount of the final result. Setting it * to 0 will cause this limit to be ignored. * @param rowCount Number of rows returned. (That is, the number of positions on the last axis of * the query.) * @param mdx Query * @param expectedResult Expected result string * @param freshConnection Whether fresh connection is required */ protected void checkNative( int resultLimit, int rowCount, String mdx, String expectedResult, boolean freshConnection) { // Don't run the test if we're testing expression dependencies. // Expression dependencies cause spurious interval calls to // 'level.getMembers()' which create false negatives in this test. if (MondrianProperties.instance().TestExpDependencies.get() > 0) { return; } getConnection().getCacheControl(null).flushSchemaCache(); try { Logger.getLogger(getClass()).debug("*** Native: " + mdx); boolean reuseConnection = !freshConnection; Connection con = getTestContext().withSchemaPool(reuseConnection).getConnection(); RolapNativeRegistry reg = getRegistry(con); reg.useHardCache(true); TestListener listener = new TestListener(); reg.setListener(listener); reg.setEnabled(true); TestCase c = new TestCase(con, resultLimit, rowCount, mdx); Result result = c.run(); String nativeResult = TestContext.toString(result); if (!listener.isFoundEvaluator()) { fail("expected native execution of " + mdx); } if (!listener.isExecuteSql()) { fail("cache is empty: expected SQL query to be executed"); } if (MondrianProperties.instance().EnableRolapCubeMemberCache.get()) { // run once more to make sure that the result comes from cache // now listener.setExecuteSql(false); c.run(); if (listener.isExecuteSql()) { fail("expected result from cache when query runs twice"); } } con.close(); Logger.getLogger(getClass()).debug("*** Interpreter: " + mdx); getConnection().getCacheControl(null).flushSchemaCache(); con = getTestContext().withSchemaPool(false).getConnection(); reg = getRegistry(con); listener.setFoundEvaluator(false); reg.setListener(listener); // disable RolapNativeSet reg.setEnabled(false); result = executeQuery(mdx, con); String interpretedResult = TestContext.toString(result); if (listener.isFoundEvaluator()) { fail("did not expect native executions of " + mdx); } if (expectedResult != null) { TestContext.assertEqualsVerbose( expectedResult, nativeResult, false, "Native implementation returned different result than " + "expected; MDX=" + mdx); TestContext.assertEqualsVerbose( expectedResult, interpretedResult, false, "Interpreter implementation returned different result than " + "expected; MDX=" + mdx); } if (!nativeResult.equals(interpretedResult)) { TestContext.assertEqualsVerbose( interpretedResult, nativeResult, false, "Native implementation returned different result than " + "interpreter; MDX=" + mdx); } } finally { Connection con = getConnection(); RolapNativeRegistry reg = getRegistry(con); reg.setEnabled(true); reg.useHardCache(false); } }
/** * During MDX query parse and execution, checks that the query results (or does not result) in a * particular SQL statement being generated. * * <p>Parses and executes the MDX query once for each SQL pattern in the current dialect. If there * are multiple patterns, runs the MDX query multiple times, and expects to see each SQL statement * appear. If there are no patterns in this dialect, the test trivially succeeds. * * @param testContext non-default test context if required * @param mdxQuery MDX query * @param patterns Set of patterns * @param negative false to assert if SQL is generated; true to assert if SQL is NOT generated * @param bypassSchemaCache whether to grab a new connection and bypass the schema cache before * parsing the MDX query * @param clearCache whether to clear cache before executing the MDX query */ protected void assertQuerySqlOrNot( TestContext testContext, String mdxQuery, SqlPattern[] patterns, boolean negative, boolean bypassSchemaCache, boolean clearCache) { Connection connection = testContext.getConnection(); mdxQuery = testContext.upgradeQuery(mdxQuery); // Run the test once for each pattern in this dialect. // (We could optimize and run it once, collecting multiple queries, and // comparing all queries at the end.) Dialect dialect = testContext.getDialect(); Dialect.DatabaseProduct d = dialect.getDatabaseProduct(); boolean patternFound = false; for (SqlPattern sqlPattern : patterns) { if (!sqlPattern.hasDatabaseProduct(d)) { // If the dialect is not one in the pattern set, skip the // test. If in the end no pattern is located, print a warning // message if required. continue; } patternFound = true; String sql = sqlPattern.getSql(); String trigger = sqlPattern.getTriggerSql(); sql = dialectize(d, sql); trigger = dialectize(d, trigger); // Create a dummy DataSource which will throw a 'bomb' if it is // asked to execute a particular SQL statement, but will otherwise // behave exactly the same as the current DataSource. RolapUtil.setHook(new TriggerHook(trigger)); Bomb bomb = null; try { if (bypassSchemaCache) { connection = testContext.withSchemaPool(false).getConnection(); } final Query query = connection.parseQuery(mdxQuery); if (clearCache) { clearCache((RolapCube) query.getCube()); } final Result result = connection.execute(query); Util.discard(result); bomb = null; } catch (Bomb e) { bomb = e; } catch (RuntimeException e) { // Walk up the exception tree and see if the root cause // was a SQL bomb. bomb = Util.getMatchingCause(e, Bomb.class); if (bomb == null) { throw e; } } finally { RolapUtil.setHook(null); } if (negative) { if (bomb != null) { fail("forbidden query [" + sql + "] detected"); } } else { if (bomb == null) { fail("expected query [" + sql + "] did not occur"); } assertEquals( replaceQuotes(sql.replaceAll("\r\n", "\n")), replaceQuotes(bomb.sql.replaceAll("\r\n", "\n"))); } } // Print warning message that no pattern was specified for the current // dialect. if (!patternFound) { String warnDialect = MondrianProperties.instance().WarnIfNoPatternForDialect.get(); if (warnDialect.equals(d.toString())) { System.out.println( "[No expected SQL statements found for dialect \"" + dialect.toString() + "\" and test not run]"); } } }
/** * Checks that a given sequence of cell requests results in a particular SQL statement being * generated. * * <p>Always clears the cache before running the requests. * * <p>Runs the requests once for each SQL pattern in the current dialect. If there are multiple * patterns, runs the MDX query multiple times, and expects to see each SQL statement appear. If * there are no patterns in this dialect, the test trivially succeeds. * * @param requests Sequence of cell requests * @param patterns Set of patterns * @param negative Set to false in order to 'expect' a query or true to 'forbid' a query. */ protected void assertRequestSql(CellRequest[] requests, SqlPattern[] patterns, boolean negative) { final RolapStar star = requests[0].getMeasure().getStar(); final String cubeName = requests[0].getMeasure().getCubeName(); final RolapCube cube = lookupCube(cubeName); final Dialect sqlDialect = star.getSqlQueryDialect(); Dialect.DatabaseProduct d = sqlDialect.getDatabaseProduct(); SqlPattern sqlPattern = SqlPattern.getPattern(d, patterns); if (d == Dialect.DatabaseProduct.UNKNOWN) { // If the dialect is not one in the pattern set, do not run the // test. We do not print any warning message. return; } boolean patternFound = false; for (SqlPattern pattern : patterns) { if (!pattern.hasDatabaseProduct(d)) { continue; } patternFound = true; clearCache(cube); String sql = sqlPattern.getSql(); String trigger = sqlPattern.getTriggerSql(); switch (d) { case ORACLE: sql = sql.replaceAll(" =as= ", " "); trigger = trigger.replaceAll(" =as= ", " "); break; case TERADATA: sql = sql.replaceAll(" =as= ", " as "); trigger = trigger.replaceAll(" =as= ", " as "); break; } // Create a dummy DataSource which will throw a 'bomb' if it is // asked to execute a particular SQL statement, but will otherwise // behave exactly the same as the current DataSource. RolapUtil.setHook(new TriggerHook(trigger)); Bomb bomb; final Execution execution = new Execution(((RolapConnection) getConnection()).getInternalStatement(), 1000); final AggregationManager aggMgr = execution .getMondrianStatement() .getMondrianConnection() .getServer() .getAggregationManager(); final Locus locus = new Locus(execution, "BatchTestCase", "BatchTestCase"); try { FastBatchingCellReader fbcr = new FastBatchingCellReader(execution, getCube(cubeName), aggMgr); for (CellRequest request : requests) { fbcr.recordCellRequest(request); } // The FBCR will presume there is a current Locus in the stack, // so let's create a mock one. Locus.push(locus); fbcr.loadAggregations(); bomb = null; } catch (Bomb e) { bomb = e; } finally { RolapUtil.setHook(null); Locus.pop(locus); } if (!negative && bomb == null) { fail("expected query [" + sql + "] did not occur"); } else if (negative && bomb != null) { fail("forbidden query [" + sql + "] detected"); } TestContext.assertEqualsVerbose(replaceQuotes(sql), replaceQuotes(bomb.sql)); } // Print warning message that no pattern was specified for the current // dialect. if (!patternFound) { String warnDialect = MondrianProperties.instance().WarnIfNoPatternForDialect.get(); if (warnDialect.equals(d.toString())) { System.out.println( "[No expected SQL statements found for dialect \"" + sqlDialect.toString() + "\" and test not run]"); } } }
/** * Active object that maintains the "global cache" (in JVM, but shared between connections using a * particular schema) and "external cache" (as implemented by a {@link mondrian.spi.SegmentCache}. * * <p>Segment states * * <table> * <tr><th>State</th><th>Meaning</th></tr> * <tr><td>Local</td><td>Initial state of a segment</td></tr> * </table> * * <h2>Decisions to be reviewed</h2> * * <p>1. Create variant of actor that processes all requests synchronously, and does not need a * thread. This would be a more 'embedded' mode of operation (albeit with worse scale-out). * * <p>2. Move functionality into AggregationManager? * * <p>3. Delete {@link mondrian.rolap.RolapStar#lookupOrCreateAggregation} and {@link * mondrian.rolap.RolapStar#lookupSegment} and {@link * mondrian.rolap.RolapStar}.lookupAggregationShared (formerly RolapStar.lookupAggregation). * * <h2>Moved methods</h2> * * <p>(Keeping track of where methods came from will make it easier to merge to the mondrian-4 code * line.) * * <p>1. {@link mondrian.rolap.RolapStar#getCellFromCache} moved from {@link * Aggregation}.getCellValue * * <h2>Done</h2> * * <p>1. Obsolete CountingAggregationManager, and property * mondrian.rolap.agg.enableCacheHitCounters. * * <p>2. AggregationManager becomes non-singleton. * * <p>3. SegmentCacheWorker methods and segmentCache field become non-static. initCache() is called * on construction. SegmentCache is passed into constructor (therefore move ServiceDiscovery into * client). AggregationManager (or maybe MondrianServer) is another constructor parameter. * * <p>5. Move SegmentHeader, SegmentBody, ConstrainedColumn into mondrian.spi. Leave behind * dependencies on mondrian.rolap.agg. In particular, put code that converts Segment + * SegmentWithData to and from SegmentHeader + SegmentBody (e.g. {@link SegmentHeader}#forSegment) * into a utility class. (Do this as CLEANUP, after functionality is complete?) * * <p>6. Move functionality Aggregation to Segment. Long-term, Aggregation should not be used as a * 'gatekeeper' to Segment. Remove Aggregation fields columns and axes. * * <p>9. Obsolete {@link RolapStar#cacheAggregations}. Similar effect will be achieved by removing * the 'jvm cache' from the chain of caches. * * <p>10. Rename Aggregation.Axis to SegmentAxis. * * <p>11. Remove Segment.setData and instead split out subclass SegmentWithData. Now segment is * immutable. You don't have to wait for its state to change. You wait for a * Future<SegmentWithData> to become ready. * * <p>12. Remove methods: RolapCube.checkAggregateModifications, * RolapStar.checkAggregateModifications, RolapSchema.checkAggregateModifications, * RolapStar.pushAggregateModificationsToGlobalCache, * RolapSchema.pushAggregateModificationsToGlobalCache, * RolapCube.pushAggregateModificationsToGlobalCache. * * <p>13. Add new implementations of Future: CompletedFuture and SlotFuture. * * <p>14. Remove methods: * * <p> * * <ul> * <li>Remove {@link SegmentLoader}.loadSegmentsFromCache - creates a {@link SegmentHeader} that * has PRECISELY same specification as the requested segment, very unlikely to have a hit * <li>Remove {@link SegmentLoader}.loadSegmentFromCacheRollup * <li>Break up {@link SegmentLoader}.cacheSegmentData, and place code that is called after a * segment has arrived * </ul> * * <p>13. Fix flush. Obsolete {@link Aggregation}.flush, and {@link RolapStar}.flush, which called * it. * * <p>18. {@code SegmentCacheManager#locateHeaderBody} (and maybe other methods) call {@link * SegmentCacheWorker#get}, and that's a slow blocking call. Make waits for segment futures should * be called from a worker or client, not an agent. * * <h2>Ideas and tasks</h2> * * <p>7. RolapStar.localAggregations and .sharedAggregations. Obsolete sharedAggregations. * * <p>8. Longer term. Move {@link mondrian.rolap.RolapStar.Bar}.segmentRefs to {@link * mondrian.server.Execution}. Would it still be thread-local? * * <p>10. Call {@link mondrian.spi.DataSourceChangeListener#isAggregationChanged}. Previously called * from {@link RolapStar}.checkAggregateModifications, now never called. * * <p>12. We can quickly identify segments affected by a flush using {@link * SegmentCacheIndex#intersectRegion}. But then what? Options: * * <ol> * <li>Option #1. Pull them in, trim them, write them out? But: causes a lot of I/O, and we may * never use these segments. Easiest. * <li>Option #2. Mark the segments in the index as needing to be trimmed; trim them when read, * and write out again. But: doesn't propagate to other nodes. * <li>Option #3. (Best?) Write a mapping SegmentHeader->Restrictions into the cache. Less I/O * than #1. Method "SegmentCache.addRestriction(SegmentHeader, CacheRegion)" * </ol> * * <p>14. Move {@link AggregationManager#getCellFromCache} somewhere else. It's concerned with local * segments, not the global/external cache. * * <p>15. Method to convert SegmentHeader + SegmentBody to Segment + SegmentWithData is imperfect. * Cannot parse predicates, compound predicates. Need mapping in star to do it properly and * efficiently? {@link mondrian.rolap.agg.SegmentBuilder.SegmentConverter} is a hack that can be * removed when this is fixed. See {@link SegmentBuilder#toSegment}. Also see #20. * * <p>17. Revisit the strategy for finding segments that can be copied from global and external * cache into local cache. The strategy of sending N {@link CellRequest}s at a time, then executing * SQL to fill in the gaps, is flawed. We need to maximize N in order to reduce segment * fragmentation, but if too high, we blow memory. BasicQueryTest.testAnalysis is an example of * this. Instead, we should send cell-requests in batches (is ~1000 the right size?), identify those * that can be answered from global or external cache, return those segments, but not execute SQL * until the end of the phase. If so, {@link CellRequestQuantumExceededException} be obsoleted. * * <p>19. Tracing. a. Remove or re-purpose {@link FastBatchingCellReader#pendingCount}; b. Add * counter to measure requests satisfied by calling {@link * mondrian.rolap.agg.SegmentCacheManager#peek}. * * <p>20. Obsolete {@link SegmentDataset} and its implementing classes. {@link SegmentWithData} can * use {@link SegmentBody} instead. Will save copying. * * <p>21. Obsolete {@link mondrian.util.CombiningGenerator}. * * <p>22. {@link SegmentHeader#constrain(mondrian.spi.SegmentColumn[])} is broken for N-dimensional * regions where N > 1. Each call currently creates N more 1-dimensional regions, but should * create 1 more N-dimensional region. {@link SegmentHeader#excludedRegions} should be a list of * {@link SegmentColumn} arrays. * * <p>23. All code that calls {@link Future#get} should probably handle {@link * CancellationException}. * * <p>24. Obsolete {@link #handler}. Indirection doesn't win anything. * * @author jhyde */ public class SegmentCacheManager { private final Handler handler = new Handler(); private final Actor ACTOR; public final Thread thread; /** Executor with which to send requests to external caches. */ public final ExecutorService cacheExecutor = Util.getExecutorService( MondrianProperties.instance().SegmentCacheManagerNumberCacheThreads.get(), 0, 1, "mondrian.rolap.agg.SegmentCacheManager$cacheExecutor", new RejectedExecutionHandler() { public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) { throw MondrianResource.instance().SegmentCacheLimitReached.ex(); } }); /** * Executor with which to execute SQL requests. * * <p>TODO: create using factory and/or configuration parameters. Executor should be shared within * MondrianServer or target JDBC database. */ public final ExecutorService sqlExecutor = Util.getExecutorService( MondrianProperties.instance().SegmentCacheManagerNumberSqlThreads.get(), 0, 1, "mondrian.rolap.agg.SegmentCacheManager$sqlExecutor", new RejectedExecutionHandler() { public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) { throw MondrianResource.instance().SqlQueryLimitReached.ex(); } }); // NOTE: This list is only mutable for testing purposes. Would rather it // were immutable. public final List<SegmentCacheWorker> segmentCacheWorkers = new CopyOnWriteArrayList<SegmentCacheWorker>(); public final SegmentCache compositeCache; private final SegmentCacheIndexRegistry indexRegistry; private static final Logger LOGGER = Logger.getLogger(AggregationManager.class); private final MondrianServer server; public SegmentCacheManager(MondrianServer server) { this.server = server; ACTOR = new Actor(); thread = new Thread(ACTOR, "mondrian.rolap.agg.SegmentCacheManager$ACTOR"); thread.setDaemon(true); thread.start(); // Create the index registry. this.indexRegistry = new SegmentCacheIndexRegistry(); // Add a local cache, if needed. if (!MondrianProperties.instance().DisableCaching.get()) { final MemorySegmentCache cache = new MemorySegmentCache(); segmentCacheWorkers.add(new SegmentCacheWorker(cache, thread)); } // Add an external cache, if configured. final List<SegmentCache> externalCache = SegmentCacheWorker.initCache(); for (SegmentCache cache : externalCache) { // Create a worker for this external cache segmentCacheWorkers.add(new SegmentCacheWorker(cache, thread)); // Hook up a listener so it can update // the segment index. cache.addListener(new AsyncCacheListener(this, server)); } compositeCache = new CompositeSegmentCache(segmentCacheWorkers); } public <T> T execute(Command<T> command) { return ACTOR.execute(handler, command); } public SegmentCacheIndexRegistry getIndexRegistry() { return indexRegistry; } /** * Adds a segment to segment index. * * <p>Called when a SQL statement has finished loading a segment. * * <p>Does not add the segment to the external cache. That is a potentially long-duration * operation, better carried out by a worker. * * @param header segment header * @param body segment body */ public void loadSucceeded(RolapStar star, SegmentHeader header, SegmentBody body) { final Locus locus = Locus.peek(); ACTOR.event( handler, new SegmentLoadSucceededEvent( System.currentTimeMillis(), locus.getServer().getMonitor(), locus.getServer().getId(), locus.execution.getMondrianStatement().getMondrianConnection().getId(), locus.execution.getMondrianStatement().getId(), locus.execution.getId(), star, header, body)); } /** * Informs cache manager that a segment load failed. * * <p>Called when a SQL statement receives an error while loading a segment. * * @param header segment header * @param throwable Error */ public void loadFailed(RolapStar star, SegmentHeader header, Throwable throwable) { final Locus locus = Locus.peek(); ACTOR.event( handler, new SegmentLoadFailedEvent( System.currentTimeMillis(), locus.getServer().getMonitor(), locus.getServer().getId(), locus.execution.getMondrianStatement().getMondrianConnection().getId(), locus.execution.getMondrianStatement().getId(), locus.execution.getId(), star, header, throwable)); } /** * Removes a segment from segment index. * * <p>Call is asynchronous. It comes back immediately. * * <p>Does not remove it from the external cache. * * @param header segment header */ public void remove(RolapStar star, SegmentHeader header) { final Locus locus = Locus.peek(); ACTOR.event( handler, new SegmentRemoveEvent( System.currentTimeMillis(), locus.getServer().getMonitor(), locus.getServer().getId(), locus.execution.getMondrianStatement().getMondrianConnection().getId(), locus.execution.getMondrianStatement().getId(), locus.execution.getId(), this, star, header)); } /** Tells the cache that a segment is newly available in an external cache. */ public void externalSegmentCreated(SegmentHeader header, MondrianServer server) { ACTOR.event( handler, new ExternalSegmentCreatedEvent( System.currentTimeMillis(), server.getMonitor(), server.getId(), 0, 0, 0, this, header)); } /** Tells the cache that a segment is no longer available in an external cache. */ public void externalSegmentDeleted(SegmentHeader header, MondrianServer server) { ACTOR.event( handler, new ExternalSegmentDeletedEvent( System.currentTimeMillis(), server.getMonitor(), server.getId(), 0, 0, 0, this, header)); } public void printCacheState(CellRegion region, PrintWriter pw, Locus locus) { ACTOR.execute(handler, new PrintCacheStateCommand(region, pw, locus)); } /** Shuts down this cache manager and all active threads and indexes. */ public void shutdown() { execute(new ShutdownCommand()); cacheExecutor.shutdown(); sqlExecutor.shutdown(); } public SegmentBuilder.SegmentConverter getConverter(RolapStar star, SegmentHeader header) { return indexRegistry .getIndex(star) .getConverter( header.schemaName, header.schemaChecksum, header.cubeName, header.rolapStarFactTableName, header.measureName, header.compoundPredicates); } /** * Makes a quick request to the aggregation manager to see whether the cell value required by a * particular cell request is in external cache. * * <p>'Quick' is relative. It is an asynchronous request (due to the aggregation manager being an * actor) and therefore somewhat slow. If the segment is in cache, will save batching up future * requests and re-executing the query. Win should be particularly noticeable for queries running * on a populated cache. Without this feature, every query would require at least two iterations. * * <p>Request does not issue SQL to populate the segment. Nor does it try to find existing * segments for rollup. Those operations can wait until next phase. * * <p>Client is responsible for adding the segment to its private cache. * * @param request Cell request * @return Segment with data, or null if not in cache */ public SegmentWithData peek(final CellRequest request) { final SegmentCacheManager.PeekResponse response = execute(new PeekCommand(request, Locus.peek())); for (SegmentHeader header : response.headerMap.keySet()) { final SegmentBody body = compositeCache.get(header); if (body != null) { final SegmentBuilder.SegmentConverter converter = response.converterMap.get(SegmentCacheIndexImpl.makeConverterKey(header)); if (converter != null) { return converter.convert(header, body); } } } for (Map.Entry<SegmentHeader, Future<SegmentBody>> entry : response.headerMap.entrySet()) { final Future<SegmentBody> bodyFuture = entry.getValue(); if (bodyFuture != null) { final SegmentBody body = Util.safeGet(bodyFuture, "Waiting for segment to load"); final SegmentHeader header = entry.getKey(); final SegmentBuilder.SegmentConverter converter = response.converterMap.get(SegmentCacheIndexImpl.makeConverterKey(header)); if (converter != null) { return converter.convert(header, body); } } } return null; } /** Visitor for messages (commands and events). */ public interface Visitor { void visit(SegmentLoadSucceededEvent event); void visit(SegmentLoadFailedEvent event); void visit(SegmentRemoveEvent event); void visit(ExternalSegmentCreatedEvent event); void visit(ExternalSegmentDeletedEvent event); } private class Handler implements Visitor { public void visit(SegmentLoadSucceededEvent event) { indexRegistry.getIndex(event.star).loadSucceeded(event.header, event.body); event.monitor.sendEvent( new CellCacheSegmentCreateEvent( event.timestamp, event.serverId, event.connectionId, event.statementId, event.executionId, event.header.getConstrainedColumns().size(), event.body == null ? 0 : event.body.getValueMap().size(), CellCacheSegmentCreateEvent.Source.SQL)); } public void visit(SegmentLoadFailedEvent event) { indexRegistry.getIndex(event.star).loadFailed(event.header, event.throwable); } public void visit(final SegmentRemoveEvent event) { indexRegistry.getIndex(event.star).remove(event.header); event.monitor.sendEvent( new CellCacheSegmentDeleteEvent( event.timestamp, event.serverId, event.connectionId, event.statementId, event.executionId, event.header.getConstrainedColumns().size(), CellCacheEvent.Source.CACHE_CONTROL)); // Remove the segment from external caches. Use an executor, because // it may take some time. We discard the future, because we don't // care too much if it fails. final Future<?> future = event.cacheMgr.cacheExecutor.submit( new Runnable() { public void run() { try { // Note that the SegmentCache API doesn't require // us to verify that the segment exists (by calling // "contains") before we call "remove". event.cacheMgr.compositeCache.remove(event.header); } catch (Throwable e) { LOGGER.warn("remove header failed: " + event.header, e); } } }); Util.safeGet(future, "SegmentCacheManager.segmentremoved"); } public void visit(ExternalSegmentCreatedEvent event) { final SegmentCacheIndex index = event.cacheMgr.indexRegistry.getIndex(event.header); if (index != null) { index.add(event.header, false, null); event.monitor.sendEvent( new CellCacheSegmentCreateEvent( event.timestamp, event.serverId, event.connectionId, event.statementId, event.executionId, event.header.getConstrainedColumns().size(), 0, CellCacheEvent.Source.EXTERNAL)); } } public void visit(ExternalSegmentDeletedEvent event) { final SegmentCacheIndex index = event.cacheMgr.indexRegistry.getIndex(event.header); if (index != null) { index.remove(event.header); event.monitor.sendEvent( new CellCacheSegmentDeleteEvent( event.timestamp, event.serverId, event.connectionId, event.statementId, event.executionId, event.header.getConstrainedColumns().size(), CellCacheEvent.Source.EXTERNAL)); } } } interface Message {} public static interface Command<T> extends Message, Callable<T> { Locus getLocus(); } /** Command to flush a particular region from cache. */ public static final class FlushCommand implements Command<FlushResult> { private final CellRegion region; private final CacheControlImpl cacheControlImpl; private final Locus locus; private final SegmentCacheManager cacheMgr; public FlushCommand( Locus locus, SegmentCacheManager mgr, CellRegion region, CacheControlImpl cacheControlImpl) { this.locus = locus; this.cacheMgr = mgr; this.region = region; this.cacheControlImpl = cacheControlImpl; } public Locus getLocus() { return locus; } public FlushResult call() throws Exception { // For each measure and each star, ask the index // which headers intersect. final List<SegmentHeader> headers = new ArrayList<SegmentHeader>(); final List<Member> measures = CacheControlImpl.findMeasures(region); final SegmentColumn[] flushRegion = CacheControlImpl.findAxisValues(region); final List<RolapStar> starList = CacheControlImpl.getStarList(region); for (Member member : measures) { if (!(member instanceof RolapStoredMeasure)) { continue; } final RolapStoredMeasure storedMeasure = (RolapStoredMeasure) member; final RolapStar star = storedMeasure.getCube().getStar(); final SegmentCacheIndex index = cacheMgr.indexRegistry.getIndex(star); headers.addAll( index.intersectRegion( member.getDimension().getSchema().getName(), ((RolapSchema) member.getDimension().getSchema()).getChecksum(), storedMeasure.getCube().getName(), storedMeasure.getName(), storedMeasure.getCube().getStar().getFactTable().getAlias(), flushRegion)); } // If flushRegion is empty, this means we must clear all // segments for the region's measures. if (flushRegion.length == 0) { for (final SegmentHeader header : headers) { for (RolapStar star : starList) { cacheMgr.indexRegistry.getIndex(star).remove(header); } // Remove the segment from external caches. Use an // executor, because it may take some time. We discard // the future, because we don't care too much if it fails. cacheControlImpl.trace( "discard segment - it cannot be constrained and maintain consistency:\n" + header.getDescription()); final Future<?> task = cacheMgr.cacheExecutor.submit( new Runnable() { public void run() { try { // Note that the SegmentCache API doesn't // require us to verify that the segment // exists (by calling "contains") before we // call "remove". cacheMgr.compositeCache.remove(header); } catch (Throwable e) { LOGGER.warn("remove header failed: " + header, e); } } }); Util.safeGet(task, "SegmentCacheManager.flush"); } return new FlushResult(Collections.<Callable<Boolean>>emptyList()); } // Now we know which headers intersect. For each of them, // we append an excluded region. // // TODO: Optimize the logic here. If a segment is mostly // empty, we should trash it completely. final List<Callable<Boolean>> callableList = new ArrayList<Callable<Boolean>>(); for (final SegmentHeader header : headers) { if (!header.canConstrain(flushRegion)) { // We have to delete that segment altogether. cacheControlImpl.trace( "discard segment - it cannot be constrained and maintain consistency:\n" + header.getDescription()); for (RolapStar star : starList) { cacheMgr.indexRegistry.getIndex(star).remove(header); } continue; } final SegmentHeader newHeader = header.constrain(flushRegion); for (final SegmentCacheWorker worker : cacheMgr.segmentCacheWorkers) { callableList.add( new Callable<Boolean>() { public Boolean call() throws Exception { boolean existed; if (worker.supportsRichIndex()) { final SegmentBody sb = worker.get(header); existed = worker.remove(header); if (sb != null) { worker.put(newHeader, sb); } } else { // The cache doesn't support rich index. We // have to clear the segment entirely. existed = worker.remove(header); } return existed; } }); } for (RolapStar star : starList) { SegmentCacheIndex index = cacheMgr.indexRegistry.getIndex(star); index.remove(header); index.add(newHeader, false, null); } } // Done return new FlushResult(callableList); } } private class PrintCacheStateCommand implements SegmentCacheManager.Command<Void> { private final PrintWriter pw; private final Locus locus; private final CellRegion region; public PrintCacheStateCommand(CellRegion region, PrintWriter pw, Locus locus) { this.region = region; this.pw = pw; this.locus = locus; } public Void call() { final List<RolapStar> starList = CacheControlImpl.getStarList(region); Collections.sort( starList, new Comparator<RolapStar>() { public int compare(RolapStar o1, RolapStar o2) { return o1.getFactTable().getAlias().compareTo(o2.getFactTable().getAlias()); } }); for (RolapStar star : starList) { indexRegistry.getIndex(star).printCacheState(pw); } return null; } public Locus getLocus() { return locus; } } /** * Result of a {@link FlushCommand}. Contains a list of tasks that must be executed by the caller * (or by an executor) to flush segments from the external cache(s). */ public static class FlushResult { public final List<Callable<Boolean>> tasks; public FlushResult(List<Callable<Boolean>> tasks) { this.tasks = tasks; } } /** Special exception, thrown only by {@link ShutdownCommand}, telling the actor to shut down. */ private static class PleaseShutdownException extends RuntimeException { private PleaseShutdownException() {} } private static class ShutdownCommand implements Command<String> { public ShutdownCommand() {} public String call() throws Exception { throw new PleaseShutdownException(); } public Locus getLocus() { return null; } } private abstract static class Event implements Message { /** * Dispatches a call to the appropriate {@code visit} method on {@link * mondrian.server.monitor.Visitor}. * * @param visitor Visitor */ public abstract void acceptWithoutResponse(Visitor visitor); } /** * Point for various clients in a request-response pattern to receive the response to their * requests. * * <p>The key type should test for object identity using the == operator, like {@link * java.util.WeakHashMap}. This allows responses to be automatically removed if the request (key) * is garbage collected. * * <p><b>Thread safety</b>. {@link #queue} is a thread-safe data structure; a thread can safely * call {@link #put} while another thread calls {@link #take}. The {@link #taken} map is not * thread safe, so you must lock the ResponseQueue before reading or writing it. * * <p>If requests are processed out of order, this queue is not ideal: until request #1 has * received its response, requests #2, #3 etc. will not receive their response. However, this is * not a problem for the monitor, which uses an actor model, processing requests in strict order. * * <p>REVIEW: This class is copy-pasted from {@link mondrian.server.monitor.Monitor}. Consider * abstracting common code. * * @param <K> request (key) type * @param <V> response (value) type */ private static class ResponseQueue<K, V> { private final BlockingQueue<Pair<K, V>> queue; /** * Entries that have been removed from the queue. If the request is garbage-collected, the map * entry is removed. */ private final Map<K, V> taken = new WeakHashMap<K, V>(); /** * Creates a ResponseQueue with given capacity. * * @param capacity Capacity */ public ResponseQueue(int capacity) { queue = new ArrayBlockingQueue<Pair<K, V>>(capacity); } /** * Places a (request, response) pair onto the queue. * * @param k Request * @param v Response * @throws InterruptedException if interrupted while waiting */ public void put(K k, V v) throws InterruptedException { queue.put(Pair.of(k, v)); } /** * Retrieves the response from the queue matching the given key, blocking until it is received. * * @param k Response * @return Response * @throws InterruptedException if interrupted while waiting */ public synchronized V take(K k) throws InterruptedException { final V v = taken.remove(k); if (v != null) { return v; } // Take the laundry out of the machine. If it's ours, leave with it. // If it's someone else's, fold it neatly and put it on the pile. for (; ; ) { final Pair<K, V> pair = queue.take(); if (pair.left.equals(k)) { return pair.right; } else { taken.put(pair.left, pair.right); } } } } /** Copy-pasted from {@link mondrian.server.monitor.Monitor}. Consider abstracting common code. */ private static class Actor implements Runnable { private final BlockingQueue<Pair<Handler, Message>> eventQueue = new ArrayBlockingQueue<Pair<Handler, Message>>(1000); private final ResponseQueue<Command<?>, Pair<Object, Throwable>> responseQueue = new ResponseQueue<Command<?>, Pair<Object, Throwable>>(1000); public void run() { try { for (; ; ) { final Pair<Handler, Message> entry = eventQueue.take(); final Handler handler = entry.left; final Message message = entry.right; try { // A message is either a command or an event. // A command returns a value that must be read by // the caller. if (message instanceof Command<?>) { Command<?> command = (Command<?>) message; try { Locus.push(command.getLocus()); Object result = command.call(); responseQueue.put(command, Pair.of(result, (Throwable) null)); } catch (PleaseShutdownException e) { responseQueue.put(command, Pair.of(null, (Throwable) null)); return; // exit event loop } catch (Throwable e) { responseQueue.put(command, Pair.of(null, e)); } finally { Locus.pop(command.getLocus()); } } else { Event event = (Event) message; event.acceptWithoutResponse(handler); // Broadcast the event to anyone who is interested. RolapUtil.MONITOR_LOGGER.debug(message); } } catch (Throwable e) { // REVIEW: Somewhere better to send it? e.printStackTrace(); } } } catch (InterruptedException e) { // REVIEW: Somewhere better to send it? e.printStackTrace(); } catch (Throwable e) { e.printStackTrace(); } } <T> T execute(Handler handler, Command<T> command) { try { eventQueue.put(Pair.<Handler, Message>of(handler, command)); } catch (InterruptedException e) { throw Util.newError(e, "Exception while executing " + command); } try { final Pair<Object, Throwable> pair = responseQueue.take(command); if (pair.right != null) { if (pair.right instanceof RuntimeException) { throw (RuntimeException) pair.right; } else if (pair.right instanceof Error) { throw (Error) pair.right; } else { throw new RuntimeException(pair.right); } } else { return (T) pair.left; } } catch (InterruptedException e) { throw Util.newError(e, "Exception while executing " + command); } } public void event(Handler handler, Event event) { try { eventQueue.put(Pair.<Handler, Message>of(handler, event)); } catch (InterruptedException e) { throw Util.newError(e, "Exception while executing " + event); } } } private static class SegmentLoadSucceededEvent extends Event { private final SegmentHeader header; private final SegmentBody body; private final long timestamp; private final RolapStar star; private final int serverId; private final int connectionId; private final long statementId; private final long executionId; private final Monitor monitor; public SegmentLoadSucceededEvent( long timestamp, Monitor monitor, int serverId, int connectionId, long statementId, long executionId, RolapStar star, SegmentHeader header, SegmentBody body) { this.timestamp = timestamp; this.monitor = monitor; this.serverId = serverId; this.connectionId = connectionId; this.statementId = statementId; this.executionId = executionId; assert header != null; assert star != null; this.star = star; this.header = header; this.body = body; // may be null } public void acceptWithoutResponse(Visitor visitor) { visitor.visit(this); } } private static class SegmentLoadFailedEvent extends Event { private final SegmentHeader header; private final Throwable throwable; private final long timestamp; private final RolapStar star; private final Monitor monitor; private final int serverId; private final int connectionId; private final long statementId; private final long executionId; public SegmentLoadFailedEvent( long timestamp, Monitor monitor, int serverId, int connectionId, long statementId, long executionId, RolapStar star, SegmentHeader header, Throwable throwable) { this.timestamp = timestamp; this.monitor = monitor; this.serverId = serverId; this.connectionId = connectionId; this.statementId = statementId; this.executionId = executionId; this.star = star; this.throwable = throwable; assert header != null; this.header = header; } public void acceptWithoutResponse(Visitor visitor) { visitor.visit(this); } } private static class SegmentRemoveEvent extends Event { private final SegmentHeader header; private final long timestamp; private final Monitor monitor; private final int serverId; private final int connectionId; private final long statementId; private final long executionId; private final RolapStar star; private final SegmentCacheManager cacheMgr; public SegmentRemoveEvent( long timestamp, Monitor monitor, int serverId, int connectionId, long statementId, long executionId, SegmentCacheManager cacheMgr, RolapStar star, SegmentHeader header) { this.timestamp = timestamp; this.monitor = monitor; this.serverId = serverId; this.connectionId = connectionId; this.statementId = statementId; this.executionId = executionId; this.cacheMgr = cacheMgr; this.star = star; assert header != null; this.header = header; } public void acceptWithoutResponse(Visitor visitor) { visitor.visit(this); } } private static class ExternalSegmentCreatedEvent extends Event { private final SegmentCacheManager cacheMgr; private final SegmentHeader header; private final long timestamp; private final Monitor monitor; private final int serverId; private final int connectionId; private final long statementId; private final long executionId; public ExternalSegmentCreatedEvent( long timestamp, Monitor monitor, int serverId, int connectionId, long statementId, long executionId, SegmentCacheManager cacheMgr, SegmentHeader header) { this.timestamp = timestamp; this.monitor = monitor; this.serverId = serverId; this.connectionId = connectionId; this.statementId = statementId; this.executionId = executionId; assert header != null; assert cacheMgr != null; this.cacheMgr = cacheMgr; this.header = header; } public void acceptWithoutResponse(Visitor visitor) { visitor.visit(this); } } private static class ExternalSegmentDeletedEvent extends Event { private final SegmentCacheManager cacheMgr; private final SegmentHeader header; private final long timestamp; private final Monitor monitor; private final int serverId; private final int connectionId; private final long statementId; private final long executionId; public ExternalSegmentDeletedEvent( long timestamp, Monitor monitor, int serverId, int connectionId, long statementId, long executionId, SegmentCacheManager cacheMgr, SegmentHeader header) { this.timestamp = timestamp; this.monitor = monitor; this.serverId = serverId; this.connectionId = connectionId; this.statementId = statementId; this.executionId = executionId; assert header != null; assert cacheMgr != null; this.cacheMgr = cacheMgr; this.header = header; } public void acceptWithoutResponse(Visitor visitor) { visitor.visit(this); } } /** * Implementation of SegmentCacheListener that updates the segment index of its aggregation * manager instance when it receives events from its assigned SegmentCache implementation. */ private static class AsyncCacheListener implements SegmentCache.SegmentCacheListener { private final SegmentCacheManager cacheMgr; private final MondrianServer server; public AsyncCacheListener(SegmentCacheManager cacheMgr, MondrianServer server) { this.cacheMgr = cacheMgr; this.server = server; } public void handle(final SegmentCacheEvent e) { if (e.isLocal()) { return; } Locus.execute( Execution.NONE, "AsyncCacheListener.handle", new Locus.Action<Void>() { public Void execute() { final SegmentCacheManager.Command<Void> command; final Locus locus = Locus.peek(); switch (e.getEventType()) { case ENTRY_CREATED: command = new Command<Void>() { public Void call() { cacheMgr.externalSegmentCreated(e.getSource(), server); return null; } public Locus getLocus() { return locus; } }; break; case ENTRY_DELETED: command = new Command<Void>() { public Void call() { cacheMgr.externalSegmentDeleted(e.getSource(), server); return null; } public Locus getLocus() { return locus; } }; break; default: throw new UnsupportedOperationException(); } cacheMgr.execute(command); return null; } }); } } /** * Makes a collection of {@link SegmentCacheWorker} objects (each of which is backed by a {@link * SegmentCache} appear to be a SegmentCache. * * <p>For most operations, it is easier to operate on a single cache. It is usually clear whether * operations should quit when finding the first match, or to operate on all workers. (For * example, {@link #remove} tries to remove the segment header from all workers, and returns * whether it was removed from any of them.) This class just does what seems most typical. If you * want another behavior for a particular operation, operate on the workers directly. */ private static class CompositeSegmentCache implements SegmentCache { private final List<SegmentCacheWorker> workers; public CompositeSegmentCache(List<SegmentCacheWorker> workers) { this.workers = workers; } public SegmentBody get(SegmentHeader header) { for (SegmentCacheWorker worker : workers) { final SegmentBody body = worker.get(header); if (body != null) { return body; } } return null; } public boolean contains(SegmentHeader header) { for (SegmentCacheWorker worker : workers) { if (worker.contains(header)) { return true; } } return false; } public List<SegmentHeader> getSegmentHeaders() { // Special case 0 and 1 workers, for which the 'union' operation // is trivial. switch (workers.size()) { case 0: return Collections.emptyList(); case 1: return workers.get(0).getSegmentHeaders(); default: final List<SegmentHeader> list = new ArrayList<SegmentHeader>(); final Set<SegmentHeader> set = new HashSet<SegmentHeader>(); for (SegmentCacheWorker worker : workers) { for (SegmentHeader header : worker.getSegmentHeaders()) { if (set.add(header)) { list.add(header); } } } return list; } } public boolean put(SegmentHeader header, SegmentBody body) { for (SegmentCacheWorker worker : workers) { worker.put(header, body); } return true; } public boolean remove(SegmentHeader header) { boolean result = false; for (SegmentCacheWorker worker : workers) { if (worker.remove(header)) { result = true; } } return result; } public void tearDown() { // nothing } public void addListener(SegmentCacheListener listener) { // nothing } public void removeListener(SegmentCacheListener listener) { // nothing } public boolean supportsRichIndex() { return false; } } /** * Locates segments in the cache that satisfy a given request. * * <p>The result consists of (a) a list of segment headers, (b) a list of futures for segment * bodies that are currently being loaded, (c) converters to convert headers into {@link * SegmentWithData}. * * <p>For (a), the client should call the cache to get the body for each segment header; it is * possible that there is no body in the cache. For (b), the client will have to wait for the * segment to arrive. */ private class PeekCommand implements SegmentCacheManager.Command<PeekResponse> { private final CellRequest request; private final Locus locus; /** * Creates a PeekCommand. * * @param request Cell request * @param locus Locus */ public PeekCommand(CellRequest request, Locus locus) { this.request = request; this.locus = locus; } public PeekResponse call() { final RolapStar.Measure measure = request.getMeasure(); final RolapStar star = measure.getStar(); final RolapSchema schema = star.getSchema(); final AggregationKey key = new AggregationKey(request); final List<SegmentHeader> headers = indexRegistry .getIndex(star) .locate( schema.getName(), schema.getChecksum(), measure.getCubeName(), measure.getName(), star.getFactTable().getAlias(), request.getConstrainedColumnsBitKey(), request.getMappedCellValues(), AggregationKey.getCompoundPredicateStringList( star, key.getCompoundPredicateList())); final Map<SegmentHeader, Future<SegmentBody>> headerMap = new HashMap<SegmentHeader, Future<SegmentBody>>(); final Map<List, SegmentBuilder.SegmentConverter> converterMap = new HashMap<List, SegmentBuilder.SegmentConverter>(); // Is there a pending segment? (A segment that has been created and // is loading via SQL.) for (final SegmentHeader header : headers) { final Future<SegmentBody> bodyFuture = indexRegistry.getIndex(star).getFuture(header); if (bodyFuture != null) { // Check if the DataSourceChangeListener wants us to clear // the current segment if (star.getChangeListener() != null && star.getChangeListener().isAggregationChanged(key)) { /* * We can't satisfy this request, and we must clear the * data from our cache. We clear it from the index * first, then queue up a job in the background * to remove the data from all the caches. */ indexRegistry.getIndex(star).remove(header); Util.safeGet( cacheExecutor.submit( new Runnable() { public void run() { try { compositeCache.remove(header); } catch (Throwable e) { LOGGER.warn("remove header failed: " + header, e); } } }), "SegmentCacheManager.peek"); } converterMap.put( SegmentCacheIndexImpl.makeConverterKey(header), getConverter(star, header)); headerMap.put(header, bodyFuture); } } return new PeekResponse(headerMap, converterMap); } public Locus getLocus() { return locus; } } private static class PeekResponse { public final Map<SegmentHeader, Future<SegmentBody>> headerMap; public final Map<List, SegmentBuilder.SegmentConverter> converterMap; public PeekResponse( Map<SegmentHeader, Future<SegmentBody>> headerMap, Map<List, SegmentBuilder.SegmentConverter> converterMap) { this.headerMap = headerMap; this.converterMap = converterMap; } } /** * Registry of all the indexes that were created for this cache manager, per {@link RolapStar}. */ public class SegmentCacheIndexRegistry { private final Map<RolapStar, SegmentCacheIndex> indexes = new ReferenceMap(ReferenceMap.WEAK, ReferenceMap.SOFT); /** Returns the {@link SegmentCacheIndex} for a given {@link RolapStar}. */ public SegmentCacheIndex getIndex(RolapStar star) { if (!indexes.containsKey(star)) { indexes.put(star, new SegmentCacheIndexImpl(thread)); } return indexes.get(star); } /** Returns the {@link SegmentCacheIndex} for a given {@link SegmentHeader}. */ private SegmentCacheIndex getIndex(SegmentHeader header) { // First we check the indexes that already exist. // This is fast. for (Entry<RolapStar, SegmentCacheIndex> entry : indexes.entrySet()) { final String factTableName = entry.getKey().getFactTable().getTableName(); final ByteString schemaChecksum = entry.getKey().getSchema().getChecksum(); if (!factTableName.equals(header.rolapStarFactTableName)) { continue; } if (!schemaChecksum.equals(header.schemaChecksum)) { continue; } return entry.getValue(); } // The index doesn't exist. Let's create it. for (RolapSchema schema : RolapSchema.getRolapSchemas()) { if (!schema.getChecksum().equals(header.schemaChecksum)) { continue; } // We have a schema match. RolapStar star = schema.getStar(header.rolapStarFactTableName); if (star != null) { // Found it. indexes.put(star, new SegmentCacheIndexImpl(thread)); } return indexes.get(star); } return null; } } }