@NotNull private Collection<PsiElement> getDeclarationsByDescriptor( @NotNull DeclarationDescriptor declarationDescriptor) { Collection<PsiElement> declarations; if (declarationDescriptor instanceof PackageViewDescriptor) { final PackageViewDescriptor aPackage = (PackageViewDescriptor) declarationDescriptor; Collection<JetFile> files = trace.get(BindingContext.PACKAGE_TO_FILES, aPackage.getFqName()); if (files == null) { return Collections .emptyList(); // package can be defined out of Kotlin sources, e. g. in library or Java // code } declarations = Collections2.transform( files, new Function<JetFile, PsiElement>() { @Override public PsiElement apply(@Nullable JetFile file) { assert file != null : "File is null for aPackage " + aPackage; return file.getPackageDirective().getNameIdentifier(); } }); } else { declarations = Collections.singletonList( BindingContextUtils.descriptorToDeclaration( trace.getBindingContext(), declarationDescriptor)); } return declarations; }
private List<String> getCurrentServerIds(boolean nag, boolean lagged) { try (Jedis jedis = pool.getResource()) { long time = getRedisTime(jedis.time()); int nagTime = 0; if (nag) { nagTime = nagAboutServers.decrementAndGet(); if (nagTime <= 0) { nagAboutServers.set(10); } } ImmutableList.Builder<String> servers = ImmutableList.builder(); Map<String, String> heartbeats = jedis.hgetAll("heartbeats"); for (Map.Entry<String, String> entry : heartbeats.entrySet()) { try { long stamp = Long.parseLong(entry.getValue()); if (lagged ? time >= stamp + 30 : time <= stamp + 30) servers.add(entry.getKey()); else if (nag && nagTime <= 0) { getLogger() .severe( entry.getKey() + " is " + (time - stamp) + " seconds behind! (Time not synchronized or server down?)"); } } catch (NumberFormatException ignored) { } } return servers.build(); } catch (JedisConnectionException e) { getLogger().log(Level.SEVERE, "Unable to fetch server IDs", e); return Collections.singletonList(configuration.getServerId()); } }
private List<SSTableReader> getNextNonExpiredSSTables( Iterable<SSTableReader> nonExpiringSSTables, final int gcBefore) { int base = cfs.getMinimumCompactionThreshold(); long now = getNow(); List<SSTableReader> mostInteresting = getCompactionCandidates(nonExpiringSSTables, now, base); if (mostInteresting != null) { return mostInteresting; } // if there is no sstable to compact in standard way, try compacting single sstable whose // droppable tombstone // ratio is greater than threshold. List<SSTableReader> sstablesWithTombstones = Lists.newArrayList(); for (SSTableReader sstable : nonExpiringSSTables) { if (worthDroppingTombstones(sstable, gcBefore)) sstablesWithTombstones.add(sstable); } if (sstablesWithTombstones.isEmpty()) return Collections.emptyList(); return Collections.singletonList( Collections.min(sstablesWithTombstones, new SSTableReader.SizeComparator())); }
public MutationState dropColumn(DropColumnStatement statement) throws SQLException { connection.rollback(); boolean wasAutoCommit = connection.getAutoCommit(); try { connection.setAutoCommit(false); TableName tableNameNode = statement.getTableName(); String schemaName = tableNameNode.getSchemaName(); String tableName = tableNameNode.getTableName(); PTable table = getLatestTable(schemaName, tableName); // TODO: Do in resolver? boolean retried = false; while (true) { final ColumnResolver resolver = FromCompiler.getResolver(statement, connection); ColumnRef columnRef = null; try { columnRef = resolver.resolveColumn((ColumnParseNode) statement.getColumnRef()); } catch (ColumnNotFoundException e) { if (statement.ifExists()) { return new MutationState(0, connection); } throw e; } TableRef tableRef = columnRef.getTableRef(); PColumn columnToDrop = columnRef.getColumn(); if (SchemaUtil.isPKColumn(columnToDrop)) { throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_DROP_PK) .setColumnName(columnToDrop.getName().getString()) .build() .buildException(); } int columnCount = table.getColumns().size() - 1; String familyName = null; List<String> binds = Lists.newArrayListWithExpectedSize(4); StringBuilder buf = new StringBuilder( "DELETE FROM " + TYPE_SCHEMA + ".\"" + TYPE_TABLE + "\" WHERE " + TABLE_SCHEM_NAME); if (schemaName == null || schemaName.length() == 0) { buf.append(" IS NULL AND "); } else { buf.append(" = ? AND "); binds.add(schemaName); } buf.append(TABLE_NAME_NAME + " = ? AND " + COLUMN_NAME + " = ? AND " + TABLE_CAT_NAME); binds.add(tableName); binds.add(columnToDrop.getName().getString()); if (columnToDrop.getFamilyName() == null) { buf.append(" IS NULL"); } else { buf.append(" = ?"); binds.add(familyName = columnToDrop.getFamilyName().getString()); } PreparedStatement colDelete = connection.prepareStatement(buf.toString()); for (int i = 0; i < binds.size(); i++) { colDelete.setString(i + 1, binds.get(i)); } colDelete.execute(); PreparedStatement colUpdate = connection.prepareStatement(UPDATE_COLUMN_POSITION); colUpdate.setString(1, schemaName); colUpdate.setString(2, tableName); for (int i = columnToDrop.getPosition() + 1; i < table.getColumns().size(); i++) { PColumn column = table.getColumns().get(i); colUpdate.setString(3, column.getName().getString()); colUpdate.setString( 4, column.getFamilyName() == null ? null : column.getFamilyName().getString()); colUpdate.setInt(5, i); colUpdate.execute(); } final long seqNum = table.getSequenceNumber() + 1; PreparedStatement tableUpsert = connection.prepareStatement(MUTATE_TABLE); tableUpsert.setString(1, schemaName); tableUpsert.setString(2, tableName); tableUpsert.setString(3, table.getType().getSerializedValue()); tableUpsert.setLong(4, seqNum); tableUpsert.setInt(5, columnCount); tableUpsert.execute(); final List<Mutation> tableMetaData = connection.getMutationState().toMutations(); connection.rollback(); // If we're dropping the last KV colum, we have to pass an indication along to the // dropColumn call // to populate a new empty KV column byte[] emptyCF = null; if (table.getType() != PTableType.VIEW && !SchemaUtil.isPKColumn(columnToDrop) && table.getColumnFamilies().get(0).getName().equals(columnToDrop.getFamilyName()) && table.getColumnFamilies().get(0).getColumns().size() == 1) { emptyCF = SchemaUtil.getEmptyColumnFamily( table.getColumnFamilies().subList(1, table.getColumnFamilies().size())); } MetaDataMutationResult result = connection .getQueryServices() .dropColumn( tableMetaData, emptyCF != null && Bytes.compareTo(emptyCF, QueryConstants.EMPTY_COLUMN_BYTES) == 0 ? emptyCF : null); try { MutationCode code = processMutationResult(schemaName, tableName, result); if (code == MutationCode.COLUMN_NOT_FOUND) { connection.addTable(schemaName, result.getTable()); if (!statement.ifExists()) { throw new ColumnNotFoundException( schemaName, tableName, familyName, columnToDrop.getName().getString()); } return new MutationState(0, connection); } connection.removeColumn( schemaName, tableName, familyName, columnToDrop.getName().getString(), seqNum, result.getMutationTime()); // If we have a VIEW, then only delete the metadata, and leave the table data alone if (table.getType() != PTableType.VIEW) { connection.setAutoCommit(true); Long scn = connection.getSCN(); // Delete everything in the column. You'll still be able to do queries at earlier // timestamps long ts = (scn == null ? result.getMutationTime() : scn); MutationPlan plan = new PostDDLCompiler(connection) .compile(tableRef, emptyCF, Collections.singletonList(columnToDrop), ts); return connection.getQueryServices().updateData(plan); } return new MutationState(0, connection); } catch (ConcurrentTableMutationException e) { if (retried) { throw e; } table = connection.getPMetaData().getSchema(schemaName).getTable(tableName); retried = true; } } } finally { connection.setAutoCommit(wasAutoCommit); } }
public MutationState dropTable(DropTableStatement statement) throws SQLException { connection.rollback(); boolean wasAutoCommit = connection.getAutoCommit(); try { TableName tableNameNode = statement.getTableName(); String schemaName = tableNameNode.getSchemaName(); String tableName = tableNameNode.getTableName(); byte[] key = SchemaUtil.getTableKey(schemaName, tableName); Long scn = connection.getSCN(); @SuppressWarnings( "deprecation") // FIXME: Remove when unintentionally deprecated method is fixed // (HBASE-7870). // FIXME: the version of the Delete constructor without the lock args was introduced // in 0.94.4, thus if we try to use it here we can no longer use the 0.94.2 version // of the client. List<Mutation> tableMetaData = Collections.<Mutation>singletonList( new Delete(key, scn == null ? HConstants.LATEST_TIMESTAMP : scn, null)); MetaDataMutationResult result = connection.getQueryServices().dropTable(tableMetaData, statement.isView()); MutationCode code = result.getMutationCode(); switch (code) { case TABLE_NOT_FOUND: if (!statement.ifExists()) { throw new TableNotFoundException(schemaName, tableName); } break; case NEWER_TABLE_FOUND: throw new NewerTableAlreadyExistsException(schemaName, tableName); case UNALLOWED_TABLE_MUTATION: throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_MUTATE_TABLE) .setSchemaName(schemaName) .setTableName(tableName) .build() .buildException(); default: try { connection.removeTable(schemaName, tableName); } catch (TableNotFoundException e) { // Ignore - just means wasn't cached } if (!statement.isView()) { connection.setAutoCommit(true); // Delete everything in the column. You'll still be able to do queries at earlier // timestamps long ts = (scn == null ? result.getMutationTime() : scn); // Create empty table and schema - they're only used to get the name from // PName name, PTableType type, long timeStamp, long sequenceNumber, List<PColumn> // columns PTable table = result.getTable(); PSchema schema = new PSchemaImpl( schemaName, ImmutableMap.<String, PTable>of(table.getName().getString(), table)); TableRef tableRef = new TableRef(null, table, schema, ts); MutationPlan plan = new PostDDLCompiler(connection) .compile(tableRef, null, Collections.<PColumn>emptyList(), ts); return connection.getQueryServices().updateData(plan); } break; } return new MutationState(0, connection); } finally { connection.setAutoCommit(wasAutoCommit); } }
public List<String> getDefaultSchemaPath() { final String schemaName = connection.getSchema(); return schemaName == null ? Collections.<String>emptyList() : Collections.singletonList(schemaName); }