@NotNull private Collection<PsiElement> getDeclarationsByDescriptor( @NotNull DeclarationDescriptor declarationDescriptor) { Collection<PsiElement> declarations; if (declarationDescriptor instanceof PackageViewDescriptor) { final PackageViewDescriptor aPackage = (PackageViewDescriptor) declarationDescriptor; Collection<JetFile> files = trace.get(BindingContext.PACKAGE_TO_FILES, aPackage.getFqName()); if (files == null) { return Collections .emptyList(); // package can be defined out of Kotlin sources, e. g. in library or Java // code } declarations = Collections2.transform( files, new Function<JetFile, PsiElement>() { @Override public PsiElement apply(@Nullable JetFile file) { assert file != null : "File is null for aPackage " + aPackage; return file.getPackageDirective().getNameIdentifier(); } }); } else { declarations = Collections.singletonList( BindingContextUtils.descriptorToDeclaration( trace.getBindingContext(), declarationDescriptor)); } return declarations; }
public PTableImpl(long timeStamp) { // For delete marker this.type = PTableType.USER; this.timeStamp = timeStamp; this.pkColumns = this.allColumns = Collections.emptyList(); this.families = Collections.emptyList(); this.familyByBytes = Collections.emptyMap(); this.familyByString = Collections.emptyMap(); this.rowKeySchema = RowKeySchema.EMPTY_SCHEMA; }
@Commit @SuppressWarnings("unused") private void afterDeserialization() throws Exception { if (otherAttributesForSerialization != null) { attributes.putAll(SimpleXmlWrappers.unwrap(otherAttributesForSerialization)); } phrasesView = Collections.unmodifiableList(phrases); subclustersView = Collections.unmodifiableList(subclusters); // Documents will be restored on the ProcessingResult level }
public Token getSuccessor(Token token) { List tokens = sortedTokens(); int index = Collections.binarySearch(tokens, token); assert index >= 0 : token + " not found in " + StringUtils.join(tokenToEndPointMap.keySet(), ", "); return (Token) ((index == (tokens.size() - 1)) ? tokens.get(0) : tokens.get(index + 1)); }
@NonNull public IAndroidTarget[] getMissingTargets() { synchronized (mLocalPackages) { if (mCachedMissingTargets == null) { Map<MissingTarget, MissingTarget> result = Maps.newHashMap(); Set<ISystemImage> seen = Sets.newHashSet(); for (IAndroidTarget target : getTargets()) { Collections.addAll(seen, target.getSystemImages()); } for (LocalPkgInfo local : getPkgsInfos(PkgType.PKG_ADDON_SYS_IMAGE)) { LocalAddonSysImgPkgInfo info = (LocalAddonSysImgPkgInfo) local; ISystemImage image = info.getSystemImage(); if (!seen.contains(image)) { addOrphanedSystemImage(image, info.getDesc(), result); } } for (LocalPkgInfo local : getPkgsInfos(PkgType.PKG_SYS_IMAGE)) { LocalSysImgPkgInfo info = (LocalSysImgPkgInfo) local; ISystemImage image = info.getSystemImage(); if (!seen.contains(image)) { addOrphanedSystemImage(image, info.getDesc(), result); } } mCachedMissingTargets = result.keySet(); } return mCachedMissingTargets.toArray(new IAndroidTarget[mCachedMissingTargets.size()]); } }
private List<String> getCurrentServerIds(boolean nag, boolean lagged) { try (Jedis jedis = pool.getResource()) { long time = getRedisTime(jedis.time()); int nagTime = 0; if (nag) { nagTime = nagAboutServers.decrementAndGet(); if (nagTime <= 0) { nagAboutServers.set(10); } } ImmutableList.Builder<String> servers = ImmutableList.builder(); Map<String, String> heartbeats = jedis.hgetAll("heartbeats"); for (Map.Entry<String, String> entry : heartbeats.entrySet()) { try { long stamp = Long.parseLong(entry.getValue()); if (lagged ? time >= stamp + 30 : time <= stamp + 30) servers.add(entry.getKey()); else if (nag && nagTime <= 0) { getLogger() .severe( entry.getKey() + " is " + (time - stamp) + " seconds behind! (Time not synchronized or server down?)"); } } catch (NumberFormatException ignored) { } } return servers.build(); } catch (JedisConnectionException e) { getLogger().log(Level.SEVERE, "Unable to fetch server IDs", e); return Collections.singletonList(configuration.getServerId()); } }
private static List<FunctionDescriptor> getSuperFunctionsForMethod( @NotNull PsiMethodWrapper method, @NotNull BindingTrace trace, @NotNull ClassDescriptor containingClass) { List<FunctionDescriptor> superFunctions = Lists.newArrayList(); Map<ClassDescriptor, JetType> superclassToSupertype = getSuperclassToSupertypeMap(containingClass); Multimap<FqName, Pair<FunctionDescriptor, PsiMethod>> superclassToFunctions = getSuperclassToFunctionsMultimap(method, trace.getBindingContext(), containingClass); for (HierarchicalMethodSignature superSignature : method.getPsiMethod().getHierarchicalMethodSignature().getSuperSignatures()) { PsiMethod superMethod = superSignature.getMethod(); PsiClass psiClass = superMethod.getContainingClass(); assert psiClass != null; String classFqNameString = psiClass.getQualifiedName(); assert classFqNameString != null; FqName classFqName = new FqName(classFqNameString); if (!JavaToKotlinClassMap.getInstance().mapPlatformClass(classFqName).isEmpty()) { for (FunctionDescriptor superFun : JavaToKotlinMethodMap.INSTANCE.getFunctions(superMethod, containingClass)) { superFunctions.add(substituteSuperFunction(superclassToSupertype, superFun)); } continue; } DeclarationDescriptor superFun = superMethod instanceof JetClsMethod ? trace.get( BindingContext.DECLARATION_TO_DESCRIPTOR, ((JetClsMethod) superMethod).getOrigin()) : findSuperFunction(superclassToFunctions.get(classFqName), superMethod); if (superFun == null) { reportCantFindSuperFunction(method); continue; } assert superFun instanceof FunctionDescriptor : superFun.getClass().getName(); superFunctions.add( substituteSuperFunction(superclassToSupertype, (FunctionDescriptor) superFun)); } // sorting for diagnostic stability Collections.sort( superFunctions, new Comparator<FunctionDescriptor>() { @Override public int compare(FunctionDescriptor fun1, FunctionDescriptor fun2) { FqNameUnsafe fqName1 = getFQName(fun1.getContainingDeclaration()); FqNameUnsafe fqName2 = getFQName(fun2.getContainingDeclaration()); return fqName1.getFqName().compareTo(fqName2.getFqName()); } }); return superFunctions; }
/** * Gets the timestamp that DateTieredCompactionStrategy considers to be the "current time". * * @return the maximum timestamp across all SSTables. * @throws java.util.NoSuchElementException if there are no SSTables. */ private long getNow() { return Collections.max( cfs.getSSTables(), new Comparator<SSTableReader>() { public int compare(SSTableReader o1, SSTableReader o2) { return Long.compare(o1.getMaxTimestamp(), o2.getMaxTimestamp()); } }) .getMaxTimestamp(); }
public <T> Enumerator<T> executeQuery(Queryable<T> queryable) { try { OptiqStatement statement = (OptiqStatement) createStatement(); OptiqPrepare.PrepareResult<T> enumerable = statement.prepare(queryable); final DataContext dataContext = createDataContext(Collections.emptyList()); return enumerable.enumerator(dataContext); } catch (SQLException e) { throw new RuntimeException(e); } }
public static int firstTokenIndex(final ArrayList ring, Token start, boolean insertMin) { assert ring.size() > 0; // insert the minimum token (at index == -1) if we were asked to include it and it isn't a // member of the ring int i = Collections.binarySearch(ring, start); if (i < 0) { i = (i + 1) * (-1); if (i >= ring.size()) i = insertMin ? -1 : 0; } return i; }
public Collection<InetAddress> pendingEndpointsFor(Token token, String keyspaceName) { Map<Range<Token>, Collection<InetAddress>> ranges = getPendingRanges(keyspaceName); if (ranges.isEmpty()) return Collections.emptyList(); Set<InetAddress> endpoints = new HashSet<InetAddress>(); for (Map.Entry<Range<Token>, Collection<InetAddress>> entry : ranges.entrySet()) { if (entry.getKey().contains(token)) endpoints.addAll(entry.getValue()); } return endpoints; }
public PColumn newColumn(int position, ColumnDef def, PrimaryKeyConstraint pkConstraint) throws SQLException { try { Set<String> pkColumnNames = pkConstraint == null ? Collections.<String>emptySet() : pkConstraint.getColumnNames(); String columnName = def.getColumnDefName().getColumnName().getName(); PName familyName = null; if (def.isPK() && !pkColumnNames.isEmpty()) { throw new SQLExceptionInfo.Builder(SQLExceptionCode.PRIMARY_KEY_ALREADY_EXISTS) .setColumnName(columnName) .build() .buildException(); } boolean isPK = def.isPK() || pkColumnNames.contains(columnName); if (def.getColumnDefName().getFamilyName() != null) { String family = def.getColumnDefName().getFamilyName().getName(); if (isPK) { throw new SQLExceptionInfo.Builder(SQLExceptionCode.PRIMARY_KEY_WITH_FAMILY_NAME) .setColumnName(columnName) .setFamilyName(family) .build() .buildException(); } else if (!def.isNull()) { throw new SQLExceptionInfo.Builder(SQLExceptionCode.KEY_VALUE_NOT_NULL) .setColumnName(columnName) .setFamilyName(family) .build() .buildException(); } familyName = new PNameImpl(family); } else if (!isPK) { familyName = QueryConstants.DEFAULT_COLUMN_FAMILY_NAME; } ColumnModifier columnModifier = def.getColumnModifier(); if (pkConstraint != null && pkConstraint.getColumnModifier(columnName) != null) { columnModifier = pkConstraint.getColumnModifier(columnName); } PColumn column = new PColumnImpl( new PNameImpl(columnName), familyName, def.getDataType(), def.getMaxLength(), def.getScale(), def.isNull(), position, columnModifier); return column; } catch (IllegalArgumentException e) { // Based on precondition check in constructor throw new SQLException(e); } }
/** * Group files with similar min timestamp into buckets. Files with recent min timestamps are * grouped together into buckets designated to short timespans while files with older timestamps * are grouped into buckets representing longer timespans. * * @param files pairs consisting of a file and its min timestamp * @param timeUnit * @param base * @param now * @return a list of buckets of files. The list is ordered such that the files with newest * timestamps come first. Each bucket is also a list of files ordered from newest to oldest. */ @VisibleForTesting static <T> List<List<T>> getBuckets( Collection<Pair<T, Long>> files, long timeUnit, int base, long now) { // Sort files by age. Newest first. final List<Pair<T, Long>> sortedFiles = Lists.newArrayList(files); Collections.sort( sortedFiles, Collections.reverseOrder( new Comparator<Pair<T, Long>>() { public int compare(Pair<T, Long> p1, Pair<T, Long> p2) { return p1.right.compareTo(p2.right); } })); List<List<T>> buckets = Lists.newArrayList(); Target target = getInitialTarget(now, timeUnit); PeekingIterator<Pair<T, Long>> it = Iterators.peekingIterator(sortedFiles.iterator()); outerLoop: while (it.hasNext()) { while (!target.onTarget(it.peek().right)) { // If the file is too new for the target, skip it. if (target.compareToTimestamp(it.peek().right) < 0) { it.next(); if (!it.hasNext()) break outerLoop; } else // If the file is too old for the target, switch targets. target = target.nextTarget(base); } List<T> bucket = Lists.newArrayList(); while (target.onTarget(it.peek().right)) { bucket.add(it.next().left); if (!it.hasNext()) break; } buckets.add(bucket); } return buckets; }
@Override public Iterable<DataKey> getSubKeys() { final Tag tag = this.findLastTag(this.path, false); if (!(tag instanceof CompoundTag)) { return (Iterable<DataKey>) Collections.emptyList(); } final List<DataKey> subKeys = (List<DataKey>) Lists.newArrayList(); for (final String name : ((CompoundTag) tag).getValue().keySet()) { subKeys.add(new NBTKey(this.createRelativeKey(name))); } return subKeys; }
private List<SSTableReader> getNextNonExpiredSSTables( Iterable<SSTableReader> nonExpiringSSTables, final int gcBefore) { int base = cfs.getMinimumCompactionThreshold(); long now = getNow(); List<SSTableReader> mostInteresting = getCompactionCandidates(nonExpiringSSTables, now, base); if (mostInteresting != null) { return mostInteresting; } // if there is no sstable to compact in standard way, try compacting single sstable whose // droppable tombstone // ratio is greater than threshold. List<SSTableReader> sstablesWithTombstones = Lists.newArrayList(); for (SSTableReader sstable : nonExpiringSSTables) { if (worthDroppingTombstones(sstable, gcBefore)) sstablesWithTombstones.add(sstable); } if (sstablesWithTombstones.isEmpty()) return Collections.emptyList(); return Collections.singletonList( Collections.min(sstablesWithTombstones, new SSTableReader.SizeComparator())); }
/** * Creates a {@link ProcessingResult} with the provided <code>attributes</code>. Assigns unique * document identifiers if documents are present in the <code>attributes</code> map (under the key * {@link AttributeNames#DOCUMENTS}). */ @SuppressWarnings("unchecked") ProcessingResult(Map<String, Object> attributes) { this.attributes = attributes; // Replace a modifiable collection of documents with an unmodifiable one final List<Document> documents = (List<Document>) attributes.get(AttributeNames.DOCUMENTS); if (documents != null) { Document.assignDocumentIds(documents); attributes.put(AttributeNames.DOCUMENTS, Collections.unmodifiableList(documents)); } // Replace a modifiable collection of clusters with an unmodifiable one final List<Cluster> clusters = (List<Cluster>) attributes.get(AttributeNames.CLUSTERS); if (clusters != null) { Cluster.assignClusterIds(clusters); attributes.put(AttributeNames.CLUSTERS, Collections.unmodifiableList(clusters)); } // Store a reference to attributes as an unmodifiable map this.attributesView = Collections.unmodifiableMap(attributes); }
@NotNull @Override public List<Instruction> getReversedInstructions() { LinkedHashSet<Instruction> traversedInstructions = Sets.newLinkedHashSet(); PseudocodeTraverserKt.traverseFollowingInstructions( sinkInstruction, traversedInstructions, BACKWARD, null); if (traversedInstructions.size() < instructions.size()) { List<Instruction> simplyReversedInstructions = Lists.newArrayList(instructions); Collections.reverse(simplyReversedInstructions); for (Instruction instruction : simplyReversedInstructions) { if (!traversedInstructions.contains(instruction)) { PseudocodeTraverserKt.traverseFollowingInstructions( instruction, traversedInstructions, BACKWARD, null); } } } return Lists.newArrayList(traversedInstructions); }
/** * @param buckets list of buckets, sorted from newest to oldest, from which to return the newest * bucket within thresholds. * @param minThreshold minimum number of sstables in a bucket to qualify. * @param maxThreshold maximum number of sstables to compact at once (the returned bucket will be * trimmed down to this). * @return a bucket (list) of sstables to compact. */ @VisibleForTesting static List<SSTableReader> newestBucket( List<List<SSTableReader>> buckets, int minThreshold, int maxThreshold, long now, long baseTime) { // If the "incoming window" has at least minThreshold SSTables, choose that one. // For any other bucket, at least 2 SSTables is enough. // In any case, limit to maxThreshold SSTables. Target incomingWindow = getInitialTarget(now, baseTime); for (List<SSTableReader> bucket : buckets) { if (bucket.size() >= minThreshold || (bucket.size() >= 2 && !incomingWindow.onTarget(bucket.get(0).getMinTimestamp()))) return trimToThreshold(bucket, maxThreshold); } return Collections.emptyList(); }
/** * @param gcBefore * @return */ private List<SSTableReader> getNextBackgroundSSTables(final int gcBefore) { if (!isEnabled() || cfs.getSSTables().isEmpty()) return Collections.emptyList(); Set<SSTableReader> uncompacting = Sets.intersection(sstables, cfs.getUncompactingSSTables()); // Find fully expired SSTables. Those will be included no matter what. Set<SSTableReader> expired = CompactionController.getFullyExpiredSSTables( cfs, uncompacting, cfs.getOverlappingSSTables(uncompacting), gcBefore); Set<SSTableReader> candidates = Sets.newHashSet(filterSuspectSSTables(uncompacting)); List<SSTableReader> compactionCandidates = new ArrayList<>(getNextNonExpiredSSTables(Sets.difference(candidates, expired), gcBefore)); if (!expired.isEmpty()) { logger.debug("Including expired sstables: {}", expired); compactionCandidates.addAll(expired); } return compactionCandidates; }
/** * 按照对应关系对 aitID:threadInfo-> 1:n 对N从大到小排序 处理MulitMap中的数据,key:value->1:n 取出<key, n> * 对n降序排序后,取得序列后的List<Map.Entry<key, n>> * * @return */ public List<Map.Entry<String, Integer>> getOrderList(Multimap<String, ThreadInfo> w_IdMap) { Set<String> keys = w_IdMap.keySet(); Map<String, Integer> w_IdMappingThread = Maps.newHashMap(); for (String key : keys) { Collection<ThreadInfo> values = w_IdMap.get(key); w_IdMappingThread.put(key, values.size()); } List<Map.Entry<String, Integer>> orderList = new ArrayList<Map.Entry<String, Integer>>(w_IdMappingThread.entrySet()); Collections.sort( orderList, new Comparator<Map.Entry<String, Integer>>() { @Override public int compare(Map.Entry<String, Integer> o1, Map.Entry<String, Integer> o2) { return o2.getValue().compareTo(o1.getValue()); } }); return orderList; }
private void writePropertyMethods( ClassVisitor visitor, Type generatedType, StructSchema<?> viewSchema, StructSchema<?> delegateSchema) { Collection<String> delegatePropertyNames; if (delegateSchema != null) { delegatePropertyNames = delegateSchema.getPropertyNames(); } else { delegatePropertyNames = Collections.emptySet(); } Class<?> viewClass = viewSchema.getType().getConcreteClass(); for (ModelProperty<?> property : viewSchema.getProperties()) { String propertyName = property.getName(); writeConfigureMethod(visitor, generatedType, property); writeSetMethod(visitor, generatedType, property); createTypeConvertingSetter(visitor, generatedType, property); // Delegated properties are handled in writeDelegateMethods() if (delegatePropertyNames.contains(propertyName)) { continue; } switch (property.getStateManagementType()) { case MANAGED: writeGetters(visitor, generatedType, property); writeSetter(visitor, generatedType, property); break; case UNMANAGED: for (WeaklyTypeReferencingMethod<?, ?> getter : property.getGetters()) { Method getterMethod = getter.getMethod(); if (!Modifier.isFinal(getterMethod.getModifiers()) && !propertyName.equals("metaClass")) { writeNonAbstractMethodWrapper(visitor, generatedType, viewClass, getterMethod); } } break; } } }
@Override public Map<String, Object> getValuesDeep() { final Tag tag = this.findLastTag(this.path, false); if (!(tag instanceof CompoundTag)) { return Collections.emptyMap(); } final Queue<Node> node = new ArrayDeque<Node>( (Collection<? extends Node>) ImmutableList.of((Object) new Node(tag))); final Map<String, Object> values = (Map<String, Object>) Maps.newHashMap(); while (!node.isEmpty()) { final Node root = node.poll(); for (final Map.Entry<String, Tag> entry : root.values.entrySet()) { final String key = this.createRelativeKey(root.parent, entry.getKey()); if (entry.getValue() instanceof CompoundTag) { node.add(new Node(key, entry.getValue())); } else { values.put(key, entry.getValue().getValue()); } } } return values; }
/** * iterator over the Tokens in the given ring, starting with the token for the node owning start * (which does not have to be a Token in the ring) * * @param includeMin True if the minimum token should be returned in the ring even if it has no * owner. */ public static Iterator<Token> ringIterator(final List ring, Token start, boolean includeMin) { assert ring.size() > 0; // insert the minimum token (at index == -1) if we were asked to include it and it isn't a // member of the ring final boolean insertMin = (includeMin && !ring.get(0).equals(StorageService.getPartitioner().getMinimumToken())) ? true : false; int i = Collections.binarySearch(ring, start); if (i < 0) { i = (i + 1) * (-1); if (i >= ring.size()) i = insertMin ? -1 : 0; } final int startIndex = i; return new AbstractIterator<Token>() { int j = startIndex; protected Token computeNext() { if (j < -1) return endOfData(); try { // return minimum for index == -1 if (j == -1) return StorageService.getPartitioner().getMinimumToken(); // return ring token for other indexes return (Token) ring.get(j); } finally { j++; if (j == ring.size()) j = insertMin ? -1 : 0; if (j == startIndex) // end iteration j = -2; } } }; }
/** Transfers document and cluster lists to the attributes map after deserialization. */ @Commit private void afterDeserialization() throws Exception { if (otherAttributesForSerialization != null) { attributes = SimpleXmlWrappers.unwrap(otherAttributesForSerialization); } attributesView = Collections.unmodifiableMap(attributes); attributes.put(AttributeNames.QUERY, query != null ? query.trim() : null); attributes.put(AttributeNames.DOCUMENTS, documents); attributes.put(AttributeNames.CLUSTERS, clusters); // Convert document ids to the actual references if (clusters != null && documents != null) { final Map<String, Document> documentsById = Maps.newHashMap(); for (Document document : documents) { documentsById.put(document.getStringId(), document); } for (Cluster cluster : clusters) { documentIdToReference(cluster, documentsById); } } }
private Set<PseudoValue> getMergedValues(@NotNull PseudoValue value) { Set<PseudoValue> result = mergedValues.get(value); return result != null ? result : Collections.<PseudoValue>emptySet(); }
@NotNull @Override public List<? extends Instruction> getUsages(@Nullable PseudoValue value) { List<? extends Instruction> result = valueUsages.get(value); return result != null ? result : Collections.<Instruction>emptyList(); }
@NotNull @Override public List<? extends KtElement> getValueElements(@Nullable PseudoValue value) { List<? extends KtElement> result = elementsToValues.getKeysByValue(value); return result != null ? result : Collections.<KtElement>emptyList(); }
public MutationState createTable(CreateTableStatement statement, byte[][] splits) throws SQLException { PTableType tableType = statement.getTableType(); boolean isView = tableType == PTableType.VIEW; if (isView && !statement.getProps().isEmpty()) { throw new SQLExceptionInfo.Builder(SQLExceptionCode.VIEW_WITH_TABLE_CONFIG) .build() .buildException(); } connection.rollback(); boolean wasAutoCommit = connection.getAutoCommit(); try { connection.setAutoCommit(false); TableName tableNameNode = statement.getTableName(); String schemaName = tableNameNode.getSchemaName(); String tableName = tableNameNode.getTableName(); PrimaryKeyConstraint pkConstraint = statement.getPrimaryKeyConstraint(); String pkName = null; Set<String> pkColumns = Collections.<String>emptySet(); Iterator<String> pkColumnsIterator = Iterators.emptyIterator(); if (pkConstraint != null) { pkColumns = pkConstraint.getColumnNames(); pkColumnsIterator = pkColumns.iterator(); pkName = pkConstraint.getName(); } List<ColumnDef> colDefs = statement.getColumnDefs(); List<PColumn> columns = Lists.newArrayListWithExpectedSize(colDefs.size()); PreparedStatement colUpsert = connection.prepareStatement(INSERT_COLUMN); int columnOrdinal = 0; Map<String, PName> familyNames = Maps.newLinkedHashMap(); boolean isPK = false; for (ColumnDef colDef : colDefs) { if (colDef.isPK()) { if (isPK) { throw new SQLExceptionInfo.Builder(SQLExceptionCode.PRIMARY_KEY_ALREADY_EXISTS) .setColumnName(colDef.getColumnDefName().getColumnName().getName()) .build() .buildException(); } isPK = true; } PColumn column = newColumn(columnOrdinal++, colDef, pkConstraint); if (SchemaUtil.isPKColumn(column)) { // TODO: remove this constraint? if (!pkColumns.isEmpty() && !column.getName().getString().equals(pkColumnsIterator.next())) { throw new SQLExceptionInfo.Builder(SQLExceptionCode.PRIMARY_KEY_OUT_OF_ORDER) .setSchemaName(schemaName) .setTableName(tableName) .setColumnName(column.getName().getString()) .build() .buildException(); } } columns.add(column); if (colDef.getDataType() == PDataType.BINARY && colDefs.size() > 1) { throw new SQLExceptionInfo.Builder(SQLExceptionCode.BINARY_IN_ROW_KEY) .setSchemaName(schemaName) .setTableName(tableName) .setColumnName(column.getName().getString()) .build() .buildException(); } if (column.getFamilyName() != null) { familyNames.put(column.getFamilyName().getString(), column.getFamilyName()); } } if (!isPK && pkColumns.isEmpty()) { throw new SQLExceptionInfo.Builder(SQLExceptionCode.PRIMARY_KEY_MISSING) .setSchemaName(schemaName) .setTableName(tableName) .build() .buildException(); } List<Pair<byte[], Map<String, Object>>> familyPropList = Lists.newArrayListWithExpectedSize(familyNames.size()); Map<String, Object> commonFamilyProps = Collections.emptyMap(); Map<String, Object> tableProps = Collections.emptyMap(); if (!statement.getProps().isEmpty()) { if (statement.isView()) { throw new SQLExceptionInfo.Builder(SQLExceptionCode.VIEW_WITH_PROPERTIES) .build() .buildException(); } for (String familyName : statement.getProps().keySet()) { if (!familyName.equals(QueryConstants.ALL_FAMILY_PROPERTIES_KEY)) { if (familyNames.get(familyName) == null) { throw new SQLExceptionInfo.Builder(SQLExceptionCode.PROPERTIES_FOR_FAMILY) .setFamilyName(familyName) .build() .buildException(); } } } commonFamilyProps = Maps.newHashMapWithExpectedSize(statement.getProps().size()); tableProps = Maps.newHashMapWithExpectedSize(statement.getProps().size()); Collection<Pair<String, Object>> props = statement.getProps().get(QueryConstants.ALL_FAMILY_PROPERTIES_KEY); // Somewhat hacky way of determining if property is for HColumnDescriptor or // HTableDescriptor HColumnDescriptor defaultDescriptor = new HColumnDescriptor(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES); for (Pair<String, Object> prop : props) { if (defaultDescriptor.getValue(prop.getFirst()) != null) { commonFamilyProps.put(prop.getFirst(), prop.getSecond()); } else { tableProps.put(prop.getFirst(), prop.getSecond()); } } } for (PName familyName : familyNames.values()) { Collection<Pair<String, Object>> props = statement.getProps().get(familyName.getString()); if (props.isEmpty()) { familyPropList.add( new Pair<byte[], Map<String, Object>>(familyName.getBytes(), commonFamilyProps)); } else { Map<String, Object> combinedFamilyProps = Maps.newHashMapWithExpectedSize(props.size() + commonFamilyProps.size()); combinedFamilyProps.putAll(commonFamilyProps); for (Pair<String, Object> prop : props) { combinedFamilyProps.put(prop.getFirst(), prop.getSecond()); } familyPropList.add( new Pair<byte[], Map<String, Object>>(familyName.getBytes(), combinedFamilyProps)); } } // Bootstrapping for our SYSTEM.TABLE that creates itself before it exists if (tableType == PTableType.SYSTEM) { PTable table = new PTableImpl( new PNameImpl(tableName), tableType, MetaDataProtocol.MIN_TABLE_TIMESTAMP, 0, QueryConstants.SYSTEM_TABLE_PK_NAME, null, columns); connection.addTable(schemaName, table); } for (PColumn column : columns) { addColumnMutation(schemaName, tableName, column, colUpsert); } Integer saltBucketNum = (Integer) tableProps.remove(PhoenixDatabaseMetaData.SALT_BUCKETS); if (saltBucketNum != null && (saltBucketNum <= 0 || saltBucketNum > SaltingUtil.MAX_BUCKET_NUM)) { throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_BUCKET_NUM) .build() .buildException(); } PreparedStatement tableUpsert = connection.prepareStatement(CREATE_TABLE); tableUpsert.setString(1, schemaName); tableUpsert.setString(2, tableName); tableUpsert.setString(3, tableType.getSerializedValue()); tableUpsert.setInt(4, 0); tableUpsert.setInt(5, columnOrdinal); if (saltBucketNum != null) { tableUpsert.setInt(6, saltBucketNum); } else { tableUpsert.setNull(6, Types.INTEGER); } tableUpsert.setString(7, pkName); tableUpsert.execute(); final List<Mutation> tableMetaData = connection.getMutationState().toMutations(); connection.rollback(); MetaDataMutationResult result = connection .getQueryServices() .createTable(tableMetaData, isView, tableProps, familyPropList, splits); MutationCode code = result.getMutationCode(); switch (code) { case TABLE_ALREADY_EXISTS: connection.addTable(schemaName, result.getTable()); if (!statement.ifNotExists()) { throw new TableAlreadyExistsException(schemaName, tableName); } break; case NEWER_TABLE_FOUND: // TODO: add table if in result? throw new NewerTableAlreadyExistsException(schemaName, tableName); case UNALLOWED_TABLE_MUTATION: throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_MUTATE_TABLE) .setSchemaName(schemaName) .setTableName(tableName) .build() .buildException(); default: PTable table = new PTableImpl( new PNameImpl(tableName), tableType, result.getMutationTime(), 0, pkName, saltBucketNum, columns); connection.addTable(schemaName, table); if (tableType == PTableType.USER) { connection.setAutoCommit(true); // Delete everything in the column. You'll still be able to do queries at earlier // timestamps Long scn = connection.getSCN(); long ts = (scn == null ? result.getMutationTime() : scn); PSchema schema = new PSchemaImpl( schemaName, ImmutableMap.<String, PTable>of(table.getName().getString(), table)); TableRef tableRef = new TableRef(null, table, schema, ts); byte[] emptyCF = SchemaUtil.getEmptyColumnFamily(table.getColumnFamilies()); MutationPlan plan = new PostDDLCompiler(connection).compile(tableRef, emptyCF, null, ts); return connection.getQueryServices().updateData(plan); } break; } return new MutationState(0, connection); } finally { connection.setAutoCommit(wasAutoCommit); } }
public MutationState dropTable(DropTableStatement statement) throws SQLException { connection.rollback(); boolean wasAutoCommit = connection.getAutoCommit(); try { TableName tableNameNode = statement.getTableName(); String schemaName = tableNameNode.getSchemaName(); String tableName = tableNameNode.getTableName(); byte[] key = SchemaUtil.getTableKey(schemaName, tableName); Long scn = connection.getSCN(); @SuppressWarnings( "deprecation") // FIXME: Remove when unintentionally deprecated method is fixed // (HBASE-7870). // FIXME: the version of the Delete constructor without the lock args was introduced // in 0.94.4, thus if we try to use it here we can no longer use the 0.94.2 version // of the client. List<Mutation> tableMetaData = Collections.<Mutation>singletonList( new Delete(key, scn == null ? HConstants.LATEST_TIMESTAMP : scn, null)); MetaDataMutationResult result = connection.getQueryServices().dropTable(tableMetaData, statement.isView()); MutationCode code = result.getMutationCode(); switch (code) { case TABLE_NOT_FOUND: if (!statement.ifExists()) { throw new TableNotFoundException(schemaName, tableName); } break; case NEWER_TABLE_FOUND: throw new NewerTableAlreadyExistsException(schemaName, tableName); case UNALLOWED_TABLE_MUTATION: throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_MUTATE_TABLE) .setSchemaName(schemaName) .setTableName(tableName) .build() .buildException(); default: try { connection.removeTable(schemaName, tableName); } catch (TableNotFoundException e) { // Ignore - just means wasn't cached } if (!statement.isView()) { connection.setAutoCommit(true); // Delete everything in the column. You'll still be able to do queries at earlier // timestamps long ts = (scn == null ? result.getMutationTime() : scn); // Create empty table and schema - they're only used to get the name from // PName name, PTableType type, long timeStamp, long sequenceNumber, List<PColumn> // columns PTable table = result.getTable(); PSchema schema = new PSchemaImpl( schemaName, ImmutableMap.<String, PTable>of(table.getName().getString(), table)); TableRef tableRef = new TableRef(null, table, schema, ts); MutationPlan plan = new PostDDLCompiler(connection) .compile(tableRef, null, Collections.<PColumn>emptyList(), ts); return connection.getQueryServices().updateData(plan); } break; } return new MutationState(0, connection); } finally { connection.setAutoCommit(wasAutoCommit); } }
public MutationState dropColumn(DropColumnStatement statement) throws SQLException { connection.rollback(); boolean wasAutoCommit = connection.getAutoCommit(); try { connection.setAutoCommit(false); TableName tableNameNode = statement.getTableName(); String schemaName = tableNameNode.getSchemaName(); String tableName = tableNameNode.getTableName(); PTable table = getLatestTable(schemaName, tableName); // TODO: Do in resolver? boolean retried = false; while (true) { final ColumnResolver resolver = FromCompiler.getResolver(statement, connection); ColumnRef columnRef = null; try { columnRef = resolver.resolveColumn((ColumnParseNode) statement.getColumnRef()); } catch (ColumnNotFoundException e) { if (statement.ifExists()) { return new MutationState(0, connection); } throw e; } TableRef tableRef = columnRef.getTableRef(); PColumn columnToDrop = columnRef.getColumn(); if (SchemaUtil.isPKColumn(columnToDrop)) { throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_DROP_PK) .setColumnName(columnToDrop.getName().getString()) .build() .buildException(); } int columnCount = table.getColumns().size() - 1; String familyName = null; List<String> binds = Lists.newArrayListWithExpectedSize(4); StringBuilder buf = new StringBuilder( "DELETE FROM " + TYPE_SCHEMA + ".\"" + TYPE_TABLE + "\" WHERE " + TABLE_SCHEM_NAME); if (schemaName == null || schemaName.length() == 0) { buf.append(" IS NULL AND "); } else { buf.append(" = ? AND "); binds.add(schemaName); } buf.append(TABLE_NAME_NAME + " = ? AND " + COLUMN_NAME + " = ? AND " + TABLE_CAT_NAME); binds.add(tableName); binds.add(columnToDrop.getName().getString()); if (columnToDrop.getFamilyName() == null) { buf.append(" IS NULL"); } else { buf.append(" = ?"); binds.add(familyName = columnToDrop.getFamilyName().getString()); } PreparedStatement colDelete = connection.prepareStatement(buf.toString()); for (int i = 0; i < binds.size(); i++) { colDelete.setString(i + 1, binds.get(i)); } colDelete.execute(); PreparedStatement colUpdate = connection.prepareStatement(UPDATE_COLUMN_POSITION); colUpdate.setString(1, schemaName); colUpdate.setString(2, tableName); for (int i = columnToDrop.getPosition() + 1; i < table.getColumns().size(); i++) { PColumn column = table.getColumns().get(i); colUpdate.setString(3, column.getName().getString()); colUpdate.setString( 4, column.getFamilyName() == null ? null : column.getFamilyName().getString()); colUpdate.setInt(5, i); colUpdate.execute(); } final long seqNum = table.getSequenceNumber() + 1; PreparedStatement tableUpsert = connection.prepareStatement(MUTATE_TABLE); tableUpsert.setString(1, schemaName); tableUpsert.setString(2, tableName); tableUpsert.setString(3, table.getType().getSerializedValue()); tableUpsert.setLong(4, seqNum); tableUpsert.setInt(5, columnCount); tableUpsert.execute(); final List<Mutation> tableMetaData = connection.getMutationState().toMutations(); connection.rollback(); // If we're dropping the last KV colum, we have to pass an indication along to the // dropColumn call // to populate a new empty KV column byte[] emptyCF = null; if (table.getType() != PTableType.VIEW && !SchemaUtil.isPKColumn(columnToDrop) && table.getColumnFamilies().get(0).getName().equals(columnToDrop.getFamilyName()) && table.getColumnFamilies().get(0).getColumns().size() == 1) { emptyCF = SchemaUtil.getEmptyColumnFamily( table.getColumnFamilies().subList(1, table.getColumnFamilies().size())); } MetaDataMutationResult result = connection .getQueryServices() .dropColumn( tableMetaData, emptyCF != null && Bytes.compareTo(emptyCF, QueryConstants.EMPTY_COLUMN_BYTES) == 0 ? emptyCF : null); try { MutationCode code = processMutationResult(schemaName, tableName, result); if (code == MutationCode.COLUMN_NOT_FOUND) { connection.addTable(schemaName, result.getTable()); if (!statement.ifExists()) { throw new ColumnNotFoundException( schemaName, tableName, familyName, columnToDrop.getName().getString()); } return new MutationState(0, connection); } connection.removeColumn( schemaName, tableName, familyName, columnToDrop.getName().getString(), seqNum, result.getMutationTime()); // If we have a VIEW, then only delete the metadata, and leave the table data alone if (table.getType() != PTableType.VIEW) { connection.setAutoCommit(true); Long scn = connection.getSCN(); // Delete everything in the column. You'll still be able to do queries at earlier // timestamps long ts = (scn == null ? result.getMutationTime() : scn); MutationPlan plan = new PostDDLCompiler(connection) .compile(tableRef, emptyCF, Collections.singletonList(columnToDrop), ts); return connection.getQueryServices().updateData(plan); } return new MutationState(0, connection); } catch (ConcurrentTableMutationException e) { if (retried) { throw e; } table = connection.getPMetaData().getSchema(schemaName).getTable(tableName); retried = true; } } } finally { connection.setAutoCommit(wasAutoCommit); } }