public CFSecuritySecSessionBuff[] readDerivedByFinishIdx( CFSecurityAuthorization Authorization, UUID SecUserId, Calendar Finish) { final String S_ProcName = "CFSecurityRamSecSession.readDerivedByFinishIdx"; CFSecuritySecSessionByFinishIdxKey key = schema.getFactorySecSession().newFinishIdxKey(); key.setRequiredSecUserId(SecUserId); key.setOptionalFinish(Finish); CFSecuritySecSessionBuff[] recArray; if (dictByFinishIdx.containsKey(key)) { Map<CFSecuritySecSessionPKey, CFSecuritySecSessionBuff> subdictFinishIdx = dictByFinishIdx.get(key); recArray = new CFSecuritySecSessionBuff[subdictFinishIdx.size()]; Iterator<CFSecuritySecSessionBuff> iter = subdictFinishIdx.values().iterator(); int idx = 0; while (iter.hasNext()) { recArray[idx++] = iter.next(); } } else { Map<CFSecuritySecSessionPKey, CFSecuritySecSessionBuff> subdictFinishIdx = new HashMap<CFSecuritySecSessionPKey, CFSecuritySecSessionBuff>(); dictByFinishIdx.put(key, subdictFinishIdx); recArray = new CFSecuritySecSessionBuff[0]; } return (recArray); }
public CFSecurityTSecGroupMemberBuff[] readDerivedByGroupIdx( CFSecurityAuthorization Authorization, long TenantId, int TSecGroupId) { final String S_ProcName = "CFSecurityRamTSecGroupMember.readDerivedByGroupIdx"; CFSecurityTSecGroupMemberByGroupIdxKey key = schema.getFactoryTSecGroupMember().newGroupIdxKey(); key.setRequiredTenantId(TenantId); key.setRequiredTSecGroupId(TSecGroupId); CFSecurityTSecGroupMemberBuff[] recArray; if (dictByGroupIdx.containsKey(key)) { Map<CFSecurityTSecGroupMemberPKey, CFSecurityTSecGroupMemberBuff> subdictGroupIdx = dictByGroupIdx.get(key); recArray = new CFSecurityTSecGroupMemberBuff[subdictGroupIdx.size()]; Iterator<CFSecurityTSecGroupMemberBuff> iter = subdictGroupIdx.values().iterator(); int idx = 0; while (iter.hasNext()) { recArray[idx++] = iter.next(); } } else { Map<CFSecurityTSecGroupMemberPKey, CFSecurityTSecGroupMemberBuff> subdictGroupIdx = new HashMap<CFSecurityTSecGroupMemberPKey, CFSecurityTSecGroupMemberBuff>(); dictByGroupIdx.put(key, subdictGroupIdx); recArray = new CFSecurityTSecGroupMemberBuff[0]; } return (recArray); }
public CFSecuritySecGroupFormBuff[] readDerivedByFormIdx( CFSecurityAuthorization Authorization, long ClusterId, int SecFormId) { final String S_ProcName = "CFInternetRamSecGroupForm.readDerivedByFormIdx"; CFSecuritySecGroupFormByFormIdxKey key = schema.getFactorySecGroupForm().newFormIdxKey(); key.setRequiredClusterId(ClusterId); key.setRequiredSecFormId(SecFormId); CFSecuritySecGroupFormBuff[] recArray; if (dictByFormIdx.containsKey(key)) { Map<CFSecuritySecGroupFormPKey, CFSecuritySecGroupFormBuff> subdictFormIdx = dictByFormIdx.get(key); recArray = new CFSecuritySecGroupFormBuff[subdictFormIdx.size()]; Iterator<CFSecuritySecGroupFormBuff> iter = subdictFormIdx.values().iterator(); int idx = 0; while (iter.hasNext()) { recArray[idx++] = iter.next(); } } else { Map<CFSecuritySecGroupFormPKey, CFSecuritySecGroupFormBuff> subdictFormIdx = new HashMap<CFSecuritySecGroupFormPKey, CFSecuritySecGroupFormBuff>(); dictByFormIdx.put(key, subdictFormIdx); recArray = new CFSecuritySecGroupFormBuff[0]; } return (recArray); }
public List<ICFAccTaxObj> readAllTax(boolean forceRead) { final String S_ProcName = "readAllTax"; if ((allTax == null) || forceRead) { Map<CFAccTaxPKey, ICFAccTaxObj> map = new HashMap<CFAccTaxPKey, ICFAccTaxObj>(); allTax = map; CFAccTaxBuff[] buffList = ((ICFAccSchema) schema.getBackingStore()) .getTableTax() .readAllDerived(schema.getAuthorization()); CFAccTaxBuff buff; ICFAccTaxObj obj; for (int idx = 0; idx < buffList.length; idx++) { buff = buffList[idx]; obj = newInstance(); obj.setPKey(((ICFAccSchema) schema.getBackingStore()).getFactoryTax().newPKey()); obj.setBuff(buff); ICFAccTaxObj realized = (ICFAccTaxObj) obj.realize(); } } Comparator<ICFAccTaxObj> cmp = new Comparator<ICFAccTaxObj>() { public int compare(ICFAccTaxObj lhs, ICFAccTaxObj rhs) { if (lhs == null) { if (rhs == null) { return (0); } else { return (-1); } } else if (rhs == null) { return (1); } else { CFAccTaxPKey lhsPKey = lhs.getPKey(); CFAccTaxPKey rhsPKey = rhs.getPKey(); int ret = lhsPKey.compareTo(rhsPKey); return (ret); } } }; int len = allTax.size(); ICFAccTaxObj arr[] = new ICFAccTaxObj[len]; Iterator<ICFAccTaxObj> valIter = allTax.values().iterator(); int idx = 0; while ((idx < len) && valIter.hasNext()) { arr[idx++] = valIter.next(); } if (idx < len) { throw CFLib.getDefaultExceptionFactory() .newArgumentUnderflowException(getClass(), S_ProcName, 0, "idx", idx, len); } else if (valIter.hasNext()) { throw CFLib.getDefaultExceptionFactory() .newArgumentOverflowException(getClass(), S_ProcName, 0, "idx", idx, len); } Arrays.sort(arr, cmp); ArrayList<ICFAccTaxObj> arrayList = new ArrayList<ICFAccTaxObj>(len); for (idx = 0; idx < len; idx++) { arrayList.add(arr[idx]); } List<ICFAccTaxObj> sortedList = arrayList; return (sortedList); }
public CFGenKbISOCountryCurrencyBuff[] readDerivedByCurrencyIdx( CFGenKbAuthorization Authorization, short ISOCurrencyId) { final String S_ProcName = "CFGenKbRamISOCountryCurrency.readDerivedByCurrencyIdx() "; CFGenKbISOCountryCurrencyByCurrencyIdxKey key = schema.getFactoryISOCountryCurrency().newCurrencyIdxKey(); key.setRequiredISOCurrencyId(ISOCurrencyId); CFGenKbISOCountryCurrencyBuff[] recArray; if (dictByCurrencyIdx.containsKey(key)) { Map<CFGenKbISOCountryCurrencyPKey, CFGenKbISOCountryCurrencyBuff> subdictCurrencyIdx = dictByCurrencyIdx.get(key); recArray = new CFGenKbISOCountryCurrencyBuff[subdictCurrencyIdx.size()]; Iterator<CFGenKbISOCountryCurrencyBuff> iter = subdictCurrencyIdx.values().iterator(); int idx = 0; while (iter.hasNext()) { recArray[idx++] = iter.next(); } } else { Map<CFGenKbISOCountryCurrencyPKey, CFGenKbISOCountryCurrencyBuff> subdictCurrencyIdx = new HashMap<CFGenKbISOCountryCurrencyPKey, CFGenKbISOCountryCurrencyBuff>(); dictByCurrencyIdx.put(key, subdictCurrencyIdx); recArray = new CFGenKbISOCountryCurrencyBuff[0]; } return (recArray); }
@Test public void getDistinctKeysAndCounts_SortByKeyDescending() { Connection connection = null; ResultSet resultSet = null; try { ConnectionManager connectionManager = temporaryFileDatabase.getConnectionManager(true); initWithTestData(connectionManager); connection = connectionManager.getConnection(null); resultSet = DBQueries.getDistinctKeysAndCounts(true, NAME, connection); Map<String, Integer> resultSetToMap = resultSetToMap(resultSet); assertEquals(3, resultSetToMap.size()); Iterator<Map.Entry<String, Integer>> entriesIterator = resultSetToMap.entrySet().iterator(); Map.Entry entry = entriesIterator.next(); assertEquals("gps", entry.getKey()); assertEquals(1, entry.getValue()); entry = entriesIterator.next(); assertEquals("airbags", entry.getKey()); assertEquals(1, entry.getValue()); entry = entriesIterator.next(); assertEquals("abs", entry.getKey()); assertEquals(2, entry.getValue()); } finally { DBUtils.closeQuietly(resultSet); DBUtils.closeQuietly(connection); } }
public CFBamDelSubDep3Buff[] readDerivedByContDelDep2Idx( CFBamAuthorization Authorization, long ContTenantId, long ContDelDep2Id) { final String S_ProcName = "CFBamRamDelSubDep3.readDerivedByContDelDep2Idx() "; CFBamDelSubDep3ByContDelDep2IdxKey key = schema.getFactoryDelSubDep3().newContDelDep2IdxKey(); key.setRequiredContTenantId(ContTenantId); key.setRequiredContDelDep2Id(ContDelDep2Id); CFBamDelSubDep3Buff[] recArray; if (dictByContDelDep2Idx.containsKey(key)) { Map<CFBamScopePKey, CFBamDelSubDep3Buff> subdictContDelDep2Idx = dictByContDelDep2Idx.get(key); recArray = new CFBamDelSubDep3Buff[subdictContDelDep2Idx.size()]; Iterator<CFBamDelSubDep3Buff> iter = subdictContDelDep2Idx.values().iterator(); int idx = 0; while (iter.hasNext()) { recArray[idx++] = iter.next(); } } else { Map<CFBamScopePKey, CFBamDelSubDep3Buff> subdictContDelDep2Idx = new HashMap<CFBamScopePKey, CFBamDelSubDep3Buff>(); dictByContDelDep2Idx.put(key, subdictContDelDep2Idx); recArray = new CFBamDelSubDep3Buff[0]; } return (recArray); }
public void parseInputFile(File inputFile) throws IOException { geneFeatures.clear(); otherRecords.clear(); try { GFFEntrySet gffEntries = GFFTools.readGFF(inputFile); Iterator itr = gffEntries.lineIterator(); int count = 0; int intronFeatures = 0; LinkedList<GFFRecord> cdsRecs = new LinkedList<GFFRecord>(); while (itr.hasNext()) { Object val = itr.next(); if (val instanceof GFFRecord) { GFFRecord rec = (GFFRecord) val; count += 1; if (rec.getFeature().endsWith("gene")) { GeneFeatures gf = new GeneFeatures(rec); geneFeatures.put(gf.id, gf); } else if (rec.getFeature().equals("CDS")) { cdsRecs.addLast(rec); } else { otherRecords.add(rec); } } } for (GFFRecord rec : cdsRecs) { Map<String, List<String>> attrs = decodeAttrMap(rec); if (geneFeatures.containsKey(attrs.get("Parent").get(0))) { geneFeatures.get(attrs.get("Parent").get(0)).addCDS(rec, attrs); } else { System.err.println("Unknown CDS Parent: " + attrs.get("Parent").get(0)); } } for (String k : geneFeatures.keySet()) { GeneFeatures gf = geneFeatures.get(k); if (gf.cds != null && gf.cds.size() > 1) { intronFeatures++; } } System.err.println("# GFF Records: " + count); System.err.println("# Gene Feature Sets: " + geneFeatures.size()); System.err.println("\t# Intron-Features: " + intronFeatures); } catch (ParserException e) { e.printStackTrace(); } catch (BioException e) { e.printStackTrace(); } }
private static TopologyStats saveTopologyData(Customer cust, TopologyData3 td, Connection c) throws SQLException { normalizeTopolgyData(td); int sourceID = getSourceID(cust, td.getSourceName(), c); // first, create all the sites and build a map to determine the IDs for servers ExchSite3[] adminGroups = td.getAdminGroups(); Map adminGroupIDs = saveSites(adminGroups, CustomerGroup.TYPE_ADMIN, sourceID, c); int adminCount = adminGroups.length; ExchSite3[] routingGroups = td.getRoutingGroups(); Map routingGroupIDs = saveSites(routingGroups, CustomerGroup.TYPE_ROUTING, sourceID, c); int routingCount = routingGroups.length; // next, create all the servers Map serverIDs = saveServers(td, sourceID, adminGroupIDs, routingGroupIDs, c); int serverCount = serverIDs.size(); // create all the stores Map storeIDs = saveStores(td, sourceID, td.getAdminGroups(), serverIDs, c); int storeCount = storeIDs.size(); // finally, do any necessary deletes Set groupIDs = new HashSet((adminGroupIDs.size() + routingGroupIDs.size()) * 2 + 1); groupIDs.addAll(adminGroupIDs.values()); groupIDs.addAll(routingGroupIDs.values()); deleteTopologyData(c, sourceID, groupIDs, serverIDs.values(), storeIDs.values()); TopologyStats ts = new TopologyStats(); ts.setAdminGroupCount(adminCount); ts.setRoutingGroupCount(routingCount); ts.setServerCount(serverCount); ts.setStoreCount(storeCount); return ts; }
// any number of users excluding itself private static String getGrantees(int num) { StringBuffer aStr = new StringBuffer(" "); Map<String, String> userPasswd = (Map<String, String>) SQLBB.getBB().getSharedMap().get(SQLSecurityTest.userPasswdMap); userPasswd.remove("thr_" + RemoteTestModule.getCurrentThread().getThreadId()); String[] users = new String[userPasswd.size()]; userPasswd.keySet().toArray(users); int i = 0; while (i < num) { int x = SQLTest.random.nextInt(users.length); aStr.append(users[x] + ", "); i++; } if (aStr.charAt(aStr.length() - 2) == ',') { aStr.deleteCharAt(aStr.length() - 2); } aStr.deleteCharAt(0); // delete the leading space return aStr.toString(); }
public CFAccFeeBuff[] readDerivedByLedgerIdx( CFSecurityAuthorization Authorization, Long LedgerTenantId, Long LedgerId) { final String S_ProcName = "CFAccRamFee.readDerivedByLedgerIdx"; CFAccFeeByLedgerIdxKey key = schema.getFactoryFee().newLedgerIdxKey(); key.setOptionalLedgerTenantId(LedgerTenantId); key.setOptionalLedgerId(LedgerId); CFAccFeeBuff[] recArray; if (dictByLedgerIdx.containsKey(key)) { Map<CFAccFeePKey, CFAccFeeBuff> subdictLedgerIdx = dictByLedgerIdx.get(key); recArray = new CFAccFeeBuff[subdictLedgerIdx.size()]; Iterator<CFAccFeeBuff> iter = subdictLedgerIdx.values().iterator(); int idx = 0; while (iter.hasNext()) { recArray[idx++] = iter.next(); } } else { Map<CFAccFeePKey, CFAccFeeBuff> subdictLedgerIdx = new HashMap<CFAccFeePKey, CFAccFeeBuff>(); dictByLedgerIdx.put(key, subdictLedgerIdx); recArray = new CFAccFeeBuff[0]; } return (recArray); }
public CFSecurityServiceBuff[] readDerivedByTypeIdx( CFSecurityAuthorization Authorization, int ServiceTypeId) { final String S_ProcName = "CFBamRamService.readDerivedByTypeIdx"; CFSecurityServiceByTypeIdxKey key = schema.getFactoryService().newTypeIdxKey(); key.setRequiredServiceTypeId(ServiceTypeId); CFSecurityServiceBuff[] recArray; if (dictByTypeIdx.containsKey(key)) { Map<CFSecurityServicePKey, CFSecurityServiceBuff> subdictTypeIdx = dictByTypeIdx.get(key); recArray = new CFSecurityServiceBuff[subdictTypeIdx.size()]; Iterator<CFSecurityServiceBuff> iter = subdictTypeIdx.values().iterator(); int idx = 0; while (iter.hasNext()) { recArray[idx++] = iter.next(); } } else { Map<CFSecurityServicePKey, CFSecurityServiceBuff> subdictTypeIdx = new HashMap<CFSecurityServicePKey, CFSecurityServiceBuff>(); dictByTypeIdx.put(key, subdictTypeIdx); recArray = new CFSecurityServiceBuff[0]; } return (recArray); }
@Test public void getDistinctKeysAndCounts() { Connection connection = null; ResultSet resultSet = null; try { ConnectionManager connectionManager = temporaryFileDatabase.getConnectionManager(true); initWithTestData(connectionManager); connection = connectionManager.getConnection(null); resultSet = DBQueries.getDistinctKeysAndCounts(false, NAME, connection); Map<String, Integer> resultSetToMap = resultSetToMap(resultSet); assertEquals(3, resultSetToMap.size()); assertEquals(new Integer(2), resultSetToMap.get("abs")); assertEquals(new Integer(1), resultSetToMap.get("airbags")); assertEquals(new Integer(1), resultSetToMap.get("gps")); } finally { DBUtils.closeQuietly(resultSet); DBUtils.closeQuietly(connection); } }
public CFAccFeeBuff[] readDerivedByDateIdx( CFSecurityAuthorization Authorization, long TenantId, long AccountId, Calendar FeeDate) { final String S_ProcName = "CFAccRamFee.readDerivedByDateIdx"; CFAccFeeByDateIdxKey key = schema.getFactoryFee().newDateIdxKey(); key.setRequiredTenantId(TenantId); key.setRequiredAccountId(AccountId); key.setRequiredFeeDate(FeeDate); CFAccFeeBuff[] recArray; if (dictByDateIdx.containsKey(key)) { Map<CFAccFeePKey, CFAccFeeBuff> subdictDateIdx = dictByDateIdx.get(key); recArray = new CFAccFeeBuff[subdictDateIdx.size()]; Iterator<CFAccFeeBuff> iter = subdictDateIdx.values().iterator(); int idx = 0; while (iter.hasNext()) { recArray[idx++] = iter.next(); } } else { Map<CFAccFeePKey, CFAccFeeBuff> subdictDateIdx = new HashMap<CFAccFeePKey, CFAccFeeBuff>(); dictByDateIdx.put(key, subdictDateIdx); recArray = new CFAccFeeBuff[0]; } return (recArray); }
public CFSecurityISOLanguageBuff[] readDerivedByCode2Idx( CFSecurityAuthorization Authorization, String ISO6391Code) { final String S_ProcName = "CFInternetRamISOLanguage.readDerivedByCode2Idx"; CFSecurityISOLanguageByCode2IdxKey key = schema.getFactoryISOLanguage().newCode2IdxKey(); key.setOptionalISO6391Code(ISO6391Code); CFSecurityISOLanguageBuff[] recArray; if (dictByCode2Idx.containsKey(key)) { Map<CFSecurityISOLanguagePKey, CFSecurityISOLanguageBuff> subdictCode2Idx = dictByCode2Idx.get(key); recArray = new CFSecurityISOLanguageBuff[subdictCode2Idx.size()]; Iterator<CFSecurityISOLanguageBuff> iter = subdictCode2Idx.values().iterator(); int idx = 0; while (iter.hasNext()) { recArray[idx++] = iter.next(); } } else { Map<CFSecurityISOLanguagePKey, CFSecurityISOLanguageBuff> subdictCode2Idx = new HashMap<CFSecurityISOLanguagePKey, CFSecurityISOLanguageBuff>(); dictByCode2Idx.put(key, subdictCode2Idx); recArray = new CFSecurityISOLanguageBuff[0]; } return (recArray); }
/** INTERNAL: Conform the result if specified. */ protected Object conformResult( Object result, UnitOfWorkImpl unitOfWork, AbstractRecord arguments, boolean buildDirectlyFromRows) { if (getSelectionCriteria() != null) { ExpressionBuilder builder = getSelectionCriteria().getBuilder(); builder.setSession(unitOfWork.getRootSession(null)); builder.setQueryClass(getReferenceClass()); } // If the query is redirected then the collection returned might no longer // correspond to the original container policy. CR#2342-S.M. ContainerPolicy cp; if (getRedirector() != null) { cp = ContainerPolicy.buildPolicyFor(result.getClass()); } else { cp = getContainerPolicy(); } // This code is now a great deal different... For one, registration is done // as part of conforming. Also, this should only be called if one actually // is conforming. // First scan the UnitOfWork for conforming instances. // This will walk through the entire cache of registered objects. // Let p be objects from result not in the cache. // Let c be objects from cache. // Presently p intersect c = empty set, but later p subset c. // By checking cache now doesConform will be called p fewer times. Map indexedInterimResult = unitOfWork.scanForConformingInstances( getSelectionCriteria(), getReferenceClass(), arguments, this); Cursor cursor = null; // In the case of cursors just conform/register the initially read collection. if (cp.isCursorPolicy()) { cursor = (Cursor) result; cp = ContainerPolicy.buildPolicyFor(ClassConstants.Vector_class); // In nested UnitOfWork session might have been session of the parent. cursor.setSession(unitOfWork); result = cursor.getObjectCollection(); // for later incremental conforming... cursor.setInitiallyConformingIndex(indexedInterimResult); cursor.setSelectionCriteriaClone(getSelectionCriteria()); cursor.setTranslationRow(arguments); } // Now conform the result from the database. // Remove any deleted or changed objects that no longer conform. // Deletes will only work for simple queries, queries with or's or anyof's may not return // correct results when untriggered indirection is in the model. Vector fromDatabase = null; // When building directly from rows, one of the performance benefits // is that we no longer have to wrap and then unwrap the originals. // result is just a vector, not a container of wrapped originals. if (buildDirectlyFromRows) { Vector rows = (Vector) result; fromDatabase = new Vector(rows.size()); for (int i = 0; i < rows.size(); i++) { Object object = rows.elementAt(i); // null is placed in the row collection for 1-m joining to filter duplicate rows. if (object != null) { Object clone = conformIndividualResult( object, unitOfWork, arguments, getSelectionCriteria(), indexedInterimResult, buildDirectlyFromRows); if (clone != null) { fromDatabase.addElement(clone); } } } } else { fromDatabase = new Vector(cp.sizeFor(result)); AbstractSession sessionToUse = unitOfWork.getParent(); for (Object iter = cp.iteratorFor(result); cp.hasNext(iter); ) { Object object = cp.next(iter, sessionToUse); Object clone = conformIndividualResult( object, unitOfWork, arguments, getSelectionCriteria(), indexedInterimResult, buildDirectlyFromRows); if (clone != null) { fromDatabase.addElement(clone); } } } // Now add the unwrapped conforming instances into an appropriate container. // Wrapping is done automatically. // Make sure a vector of exactly the right size is returned. Object conformedResult = cp.containerInstance(indexedInterimResult.size() + fromDatabase.size()); Object eachClone; for (Iterator enumtr = indexedInterimResult.values().iterator(); enumtr.hasNext(); ) { eachClone = enumtr.next(); cp.addInto(eachClone, conformedResult, unitOfWork); } for (Enumeration enumtr = fromDatabase.elements(); enumtr.hasMoreElements(); ) { eachClone = enumtr.nextElement(); cp.addInto(eachClone, conformedResult, unitOfWork); } if (cursor != null) { cursor.setObjectCollection((Vector) conformedResult); // For nested UOW must copy all in object collection to // initiallyConformingIndex, as some of these could have been from // the parent UnitOfWork. if (unitOfWork.isNestedUnitOfWork()) { for (Enumeration enumtr = cursor.getObjectCollection().elements(); enumtr.hasMoreElements(); ) { Object clone = enumtr.nextElement(); indexedInterimResult.put(clone, clone); } } return cursor; } else { return conformedResult; } }
public MutationState createTable(CreateTableStatement statement, byte[][] splits) throws SQLException { PTableType tableType = statement.getTableType(); boolean isView = tableType == PTableType.VIEW; if (isView && !statement.getProps().isEmpty()) { throw new SQLExceptionInfo.Builder(SQLExceptionCode.VIEW_WITH_TABLE_CONFIG) .build() .buildException(); } connection.rollback(); boolean wasAutoCommit = connection.getAutoCommit(); try { connection.setAutoCommit(false); TableName tableNameNode = statement.getTableName(); String schemaName = tableNameNode.getSchemaName(); String tableName = tableNameNode.getTableName(); PrimaryKeyConstraint pkConstraint = statement.getPrimaryKeyConstraint(); String pkName = null; Set<String> pkColumns = Collections.<String>emptySet(); Iterator<String> pkColumnsIterator = Iterators.emptyIterator(); if (pkConstraint != null) { pkColumns = pkConstraint.getColumnNames(); pkColumnsIterator = pkColumns.iterator(); pkName = pkConstraint.getName(); } List<ColumnDef> colDefs = statement.getColumnDefs(); List<PColumn> columns = Lists.newArrayListWithExpectedSize(colDefs.size()); PreparedStatement colUpsert = connection.prepareStatement(INSERT_COLUMN); int columnOrdinal = 0; Map<String, PName> familyNames = Maps.newLinkedHashMap(); boolean isPK = false; for (ColumnDef colDef : colDefs) { if (colDef.isPK()) { if (isPK) { throw new SQLExceptionInfo.Builder(SQLExceptionCode.PRIMARY_KEY_ALREADY_EXISTS) .setColumnName(colDef.getColumnDefName().getColumnName().getName()) .build() .buildException(); } isPK = true; } PColumn column = newColumn(columnOrdinal++, colDef, pkConstraint); if (SchemaUtil.isPKColumn(column)) { // TODO: remove this constraint? if (!pkColumns.isEmpty() && !column.getName().getString().equals(pkColumnsIterator.next())) { throw new SQLExceptionInfo.Builder(SQLExceptionCode.PRIMARY_KEY_OUT_OF_ORDER) .setSchemaName(schemaName) .setTableName(tableName) .setColumnName(column.getName().getString()) .build() .buildException(); } } columns.add(column); if (colDef.getDataType() == PDataType.BINARY && colDefs.size() > 1) { throw new SQLExceptionInfo.Builder(SQLExceptionCode.BINARY_IN_ROW_KEY) .setSchemaName(schemaName) .setTableName(tableName) .setColumnName(column.getName().getString()) .build() .buildException(); } if (column.getFamilyName() != null) { familyNames.put(column.getFamilyName().getString(), column.getFamilyName()); } } if (!isPK && pkColumns.isEmpty()) { throw new SQLExceptionInfo.Builder(SQLExceptionCode.PRIMARY_KEY_MISSING) .setSchemaName(schemaName) .setTableName(tableName) .build() .buildException(); } List<Pair<byte[], Map<String, Object>>> familyPropList = Lists.newArrayListWithExpectedSize(familyNames.size()); Map<String, Object> commonFamilyProps = Collections.emptyMap(); Map<String, Object> tableProps = Collections.emptyMap(); if (!statement.getProps().isEmpty()) { if (statement.isView()) { throw new SQLExceptionInfo.Builder(SQLExceptionCode.VIEW_WITH_PROPERTIES) .build() .buildException(); } for (String familyName : statement.getProps().keySet()) { if (!familyName.equals(QueryConstants.ALL_FAMILY_PROPERTIES_KEY)) { if (familyNames.get(familyName) == null) { throw new SQLExceptionInfo.Builder(SQLExceptionCode.PROPERTIES_FOR_FAMILY) .setFamilyName(familyName) .build() .buildException(); } } } commonFamilyProps = Maps.newHashMapWithExpectedSize(statement.getProps().size()); tableProps = Maps.newHashMapWithExpectedSize(statement.getProps().size()); Collection<Pair<String, Object>> props = statement.getProps().get(QueryConstants.ALL_FAMILY_PROPERTIES_KEY); // Somewhat hacky way of determining if property is for HColumnDescriptor or // HTableDescriptor HColumnDescriptor defaultDescriptor = new HColumnDescriptor(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES); for (Pair<String, Object> prop : props) { if (defaultDescriptor.getValue(prop.getFirst()) != null) { commonFamilyProps.put(prop.getFirst(), prop.getSecond()); } else { tableProps.put(prop.getFirst(), prop.getSecond()); } } } for (PName familyName : familyNames.values()) { Collection<Pair<String, Object>> props = statement.getProps().get(familyName.getString()); if (props.isEmpty()) { familyPropList.add( new Pair<byte[], Map<String, Object>>(familyName.getBytes(), commonFamilyProps)); } else { Map<String, Object> combinedFamilyProps = Maps.newHashMapWithExpectedSize(props.size() + commonFamilyProps.size()); combinedFamilyProps.putAll(commonFamilyProps); for (Pair<String, Object> prop : props) { combinedFamilyProps.put(prop.getFirst(), prop.getSecond()); } familyPropList.add( new Pair<byte[], Map<String, Object>>(familyName.getBytes(), combinedFamilyProps)); } } // Bootstrapping for our SYSTEM.TABLE that creates itself before it exists if (tableType == PTableType.SYSTEM) { PTable table = new PTableImpl( new PNameImpl(tableName), tableType, MetaDataProtocol.MIN_TABLE_TIMESTAMP, 0, QueryConstants.SYSTEM_TABLE_PK_NAME, null, columns); connection.addTable(schemaName, table); } for (PColumn column : columns) { addColumnMutation(schemaName, tableName, column, colUpsert); } Integer saltBucketNum = (Integer) tableProps.remove(PhoenixDatabaseMetaData.SALT_BUCKETS); if (saltBucketNum != null && (saltBucketNum <= 0 || saltBucketNum > SaltingUtil.MAX_BUCKET_NUM)) { throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_BUCKET_NUM) .build() .buildException(); } PreparedStatement tableUpsert = connection.prepareStatement(CREATE_TABLE); tableUpsert.setString(1, schemaName); tableUpsert.setString(2, tableName); tableUpsert.setString(3, tableType.getSerializedValue()); tableUpsert.setInt(4, 0); tableUpsert.setInt(5, columnOrdinal); if (saltBucketNum != null) { tableUpsert.setInt(6, saltBucketNum); } else { tableUpsert.setNull(6, Types.INTEGER); } tableUpsert.setString(7, pkName); tableUpsert.execute(); final List<Mutation> tableMetaData = connection.getMutationState().toMutations(); connection.rollback(); MetaDataMutationResult result = connection .getQueryServices() .createTable(tableMetaData, isView, tableProps, familyPropList, splits); MutationCode code = result.getMutationCode(); switch (code) { case TABLE_ALREADY_EXISTS: connection.addTable(schemaName, result.getTable()); if (!statement.ifNotExists()) { throw new TableAlreadyExistsException(schemaName, tableName); } break; case NEWER_TABLE_FOUND: // TODO: add table if in result? throw new NewerTableAlreadyExistsException(schemaName, tableName); case UNALLOWED_TABLE_MUTATION: throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_MUTATE_TABLE) .setSchemaName(schemaName) .setTableName(tableName) .build() .buildException(); default: PTable table = new PTableImpl( new PNameImpl(tableName), tableType, result.getMutationTime(), 0, pkName, saltBucketNum, columns); connection.addTable(schemaName, table); if (tableType == PTableType.USER) { connection.setAutoCommit(true); // Delete everything in the column. You'll still be able to do queries at earlier // timestamps Long scn = connection.getSCN(); long ts = (scn == null ? result.getMutationTime() : scn); PSchema schema = new PSchemaImpl( schemaName, ImmutableMap.<String, PTable>of(table.getName().getString(), table)); TableRef tableRef = new TableRef(null, table, schema, ts); byte[] emptyCF = SchemaUtil.getEmptyColumnFamily(table.getColumnFamilies()); MutationPlan plan = new PostDDLCompiler(connection).compile(tableRef, emptyCF, null, ts); return connection.getQueryServices().updateData(plan); } break; } return new MutationState(0, connection); } finally { connection.setAutoCommit(wasAutoCommit); } }
private static Map saveServers( TopologyData3 td, int sourceID, Map adminGroupIDs, Map routingGroupIDs, Connection c) throws SQLException { PreparedStatement insert = null; PreparedStatement update = null; try { insert = c.prepareStatement( "insert into dat_customer_servers (server_id, source_id, admin_group_id, routing_group_id, internal_name, display_name, sink_capable, sink_enabled, server_role, server_version, cloud_service_id) " + "values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"); update = c.prepareStatement( "update dat_customer_servers " + "set source_id = ?, admin_group_id = ?, routing_group_id = ?, internal_name = ?, " + " display_name = ?, sink_capable = ?, server_role = ?, server_version = ?, is_deleted = false, purge_time = NULL, cloud_service_id = ? " + "where server_id = ?"); Map serverIDs = new HashMap(adminGroupIDs.size() * 8 + 1); ExchSite3[] adminGroups = td.getAdminGroups(); ExchSite3[] routingGroups = td.getRoutingGroups(); for (int i = 0; i < adminGroups.length; i++) { ExchSite3 adminGroup = adminGroups[i]; int adminGroupID = ((Number) adminGroupIDs.get(adminGroup.getInternalName())).intValue(); ExchServer3[] servers = adminGroup.getServers(); for (int j = 0; j < servers.length; j++) { ExchServer3 server = servers[j]; int serverID = queryLookupServerId(sourceID, server.getInternalName(), c); int routingGroupID = findRoutingGroupID(server.getInternalName(), routingGroups, routingGroupIDs); // for non-sinkable servers, attempt to parse cloud service from version field CloudService cloudService = CloudService.NONE; if (!server.isSinkCapable()) { Matcher matcher = SERVER_VERSION_PATTERN.matcher(server.getServerVersion()); if (matcher.matches()) { cloudService = CloudService.valueOfCaseInsensitive(matcher.group(1), cloudService); } } if (serverID == 0) { // if we couldn't find an existing serverID, grab the next one from the sequence serverID = getNextFromSequence("seq_server_id", c); serverIDs.put(server.getInternalName(), serverID); insert.setInt(1, serverID); insert.setInt(2, sourceID); insert.setInt(3, adminGroupID); insert.setInt(4, routingGroupID); insert.setString( 5, DirectoryUtils.truncateString( server.getInternalName(), DB_SERVER_INTERNALNAME_LENGTH)); insert.setString( 6, DirectoryUtils.truncateString( server.getDisplayName(), DB_SERVER_DISPLAYNAME_LENGTH)); insert.setInt(7, server.isSinkCapable() ? 1 : 0); insert.setInt( 8, cloudService.getId() <= CloudService.ONPREMISES.getId() ? 1 : 0); // by default, all non-cloud servers are sink-enabled (so if they become // e2k servers, they're enabled) insert.setInt(9, server.getServerRole()); insert.setString( 10, DirectoryUtils.truncateString(server.getServerVersion(), DB_SERVER_VERSION_LENGTH)); insert.setInt(11, cloudService.getId()); insert.executeUpdate(); } else { // if there is an existing serverID, update it (preserve value of sink_enabled) serverIDs.put(server.getInternalName(), serverID); update.setInt(1, sourceID); update.setInt(2, adminGroupID); update.setInt(3, routingGroupID); update.setString( 4, DirectoryUtils.truncateString( server.getInternalName(), DB_SERVER_INTERNALNAME_LENGTH)); update.setString( 5, DirectoryUtils.truncateString( server.getDisplayName(), DB_SERVER_DISPLAYNAME_LENGTH)); update.setInt(6, server.isSinkCapable() ? 1 : 0); update.setInt(7, server.getServerRole()); update.setString( 8, DirectoryUtils.truncateString(server.getServerVersion(), DB_SERVER_VERSION_LENGTH)); update.setInt(9, cloudService.getId()); update.setInt(10, serverID); update.executeUpdate(); } } } return serverIDs; } finally { if (update != null) { update.close(); } if (insert != null) { insert.close(); } } }
public List<ICFAccTaxObj> readTaxByTenantIdx(long TenantId, boolean forceRead) { final String S_ProcName = "readTaxByTenantIdx"; CFAccTaxByTenantIdxKey key = ((ICFAccSchema) schema.getBackingStore()).getFactoryTax().newTenantIdxKey(); key.setRequiredTenantId(TenantId); Map<CFAccTaxPKey, ICFAccTaxObj> dict; if (indexByTenantIdx == null) { indexByTenantIdx = new HashMap<CFAccTaxByTenantIdxKey, Map<CFAccTaxPKey, ICFAccTaxObj>>(); } if ((!forceRead) && indexByTenantIdx.containsKey(key)) { dict = indexByTenantIdx.get(key); } else { dict = new HashMap<CFAccTaxPKey, ICFAccTaxObj>(); // Allow other threads to dirty-read while we're loading indexByTenantIdx.put(key, dict); ICFAccTaxObj obj; CFAccTaxBuff[] buffList = ((ICFAccSchema) schema.getBackingStore()) .getTableTax() .readDerivedByTenantIdx(schema.getAuthorization(), TenantId); CFAccTaxBuff buff; for (int idx = 0; idx < buffList.length; idx++) { buff = buffList[idx]; obj = schema.getTaxTableObj().newInstance(); obj.setPKey(((ICFAccSchema) schema.getBackingStore()).getFactoryTax().newPKey()); obj.setBuff(buff); ICFAccTaxObj realized = (ICFAccTaxObj) obj.realize(); } } Comparator<ICFAccTaxObj> cmp = new Comparator<ICFAccTaxObj>() { public int compare(ICFAccTaxObj lhs, ICFAccTaxObj rhs) { if (lhs == null) { if (rhs == null) { return (0); } else { return (-1); } } else if (rhs == null) { return (1); } else { CFAccTaxPKey lhsPKey = lhs.getPKey(); CFAccTaxPKey rhsPKey = rhs.getPKey(); int ret = lhsPKey.compareTo(rhsPKey); return (ret); } } }; int len = dict.size(); ICFAccTaxObj arr[] = new ICFAccTaxObj[len]; Iterator<ICFAccTaxObj> valIter = dict.values().iterator(); int idx = 0; while ((idx < len) && valIter.hasNext()) { arr[idx++] = valIter.next(); } if (idx < len) { throw CFLib.getDefaultExceptionFactory() .newArgumentUnderflowException(getClass(), S_ProcName, 0, "idx", idx, len); } else if (valIter.hasNext()) { throw CFLib.getDefaultExceptionFactory() .newArgumentOverflowException(getClass(), S_ProcName, 0, "idx", idx, len); } Arrays.sort(arr, cmp); ArrayList<ICFAccTaxObj> arrayList = new ArrayList<ICFAccTaxObj>(len); for (idx = 0; idx < len; idx++) { arrayList.add(arr[idx]); } List<ICFAccTaxObj> sortedList = arrayList; return (sortedList); }
@Override public int size() { check(); return map.size(); }