private long getCommitTimestampRollBackIfNecessary( long startTimestamp, Multimap<String, Cell> tableNameToCell) { Long commitTimestamp = transactionService.get(startTimestamp); if (commitTimestamp == null) { // Roll back this transaction (note that rolling back arbitrary transactions // can never cause correctness issues, only liveness issues) try { transactionService.putUnlessExists(startTimestamp, TransactionConstants.FAILED_COMMIT_TS); } catch (KeyAlreadyExistsException e) { String msg = "Could not roll back transaction with start timestamp " + startTimestamp + "; either" + " it was already rolled back (by a different transaction), or it committed successfully" + " before we could roll it back."; log.error( "This isn't a bug but it should be very infrequent. " + msg, new TransactionFailedRetriableException(msg, e)); } commitTimestamp = transactionService.get(startTimestamp); } if (commitTimestamp == null) { throw new RuntimeException( "expected commit timestamp to be non-null for startTs: " + startTimestamp); } if (commitTimestamp == TransactionConstants.FAILED_COMMIT_TS) { for (String table : tableNameToCell.keySet()) { Map<Cell, Long> toDelete = Maps2.createConstantValueMap(tableNameToCell.get(table), startTimestamp); keyValueService.delete(table, Multimaps.forMap(toDelete)); } } return commitTimestamp; }
static void checkAndSetReplicationFactor( Cassandra.Client client, KsDef ks, boolean freshInstance, int desiredRf, boolean safetyDisabled) throws InvalidRequestException, SchemaDisagreementException, TException { if (freshInstance) { Set<String> dcs = CassandraVerifier.sanityCheckDatacenters(client, desiredRf, safetyDisabled); // If RF exceeds # hosts, then Cassandra will reject writes ks.setStrategy_options(Maps2.createConstantValueMap(dcs, String.valueOf(desiredRf))); return; } final Set<String> dcs; if (CassandraConstants.SIMPLE_STRATEGY.equals(ks.getStrategy_class())) { int currentRF = Integer.parseInt( ks.getStrategy_options().get(CassandraConstants.REPLICATION_FACTOR_OPTION)); String errorMessage = "This cassandra cluster is running using the simple partitioning stragegy. " + "This partitioner is not rack aware and is not intended for use on prod. " + "This will have to be fixed by manually configuring to the network partitioner " + "and running the appropriate repairs. " + "Contact the AtlasDB team to perform these steps."; if (currentRF != 1) { logErrorOrThrow(errorMessage, safetyDisabled); } // Automatically convert RF=1 to look like network partitioner. dcs = CassandraVerifier.sanityCheckDatacenters(client, desiredRf, safetyDisabled); if (dcs.size() > 1) { logErrorOrThrow(errorMessage, safetyDisabled); } if (!safetyDisabled) { ks.setStrategy_class(CassandraConstants.NETWORK_STRATEGY); ks.setStrategy_options(ImmutableMap.of(dcs.iterator().next(), "1")); } } else { dcs = CassandraVerifier.sanityCheckDatacenters(client, desiredRf, safetyDisabled); } Map<String, String> strategyOptions = Maps.newHashMap(ks.getStrategy_options()); for (String dc : dcs) { if (strategyOptions.get(dc) == null) { logErrorOrThrow( "The datacenter for this cassandra cluster is invalid. " + " failed dc: " + dc + " strategyOptions: " + strategyOptions, safetyDisabled); } } String dc = dcs.iterator().next(); int currentRF = Integer.parseInt(strategyOptions.get(dc)); // We need to worry about user not running repair and user skipping replication levels. if (currentRF == 1 && desiredRf == 2) { log.error( "Upping AtlasDB replication factor from 1 to 2. User should run " + "`nodetool repair` on cluster if they have not already!"); strategyOptions.put(dc, String.valueOf(desiredRf)); ks.setStrategy_options(strategyOptions); } else if (currentRF == 1 && desiredRf == CassandraConstants.DEFAULT_REPLICATION_FACTOR) { log.error( "Upping AtlasDB replication factor from 1 " + "to 3 is NOT allowed directly.\n" + "Increase replication factor to 2 first, then run `nodetool repair`. If it succeeds, increase replication factor to 3, and run `nodetool repair`"); } else if (currentRF == 2 && desiredRf == CassandraConstants.DEFAULT_REPLICATION_FACTOR) { strategyOptions.put(dc, String.valueOf(desiredRf)); ks.setStrategy_options(strategyOptions); ks.setCf_defs(ImmutableList.<CfDef>of()); client.system_update_keyspace(ks); log.warn( "Updating AtlasDB replication factor from " + currentRF + " to " + desiredRf + " process are NOT completed!" + " User may want to run `nodetool repair` to make all replicas consistent."); } else if (currentRF > desiredRf) { // We are moving to a lower RF, this should be always safe from a consistency // rule standpoint log.error( "Reducing AtlasDB replication factor from " + currentRF + " to " + desiredRf + ". User may want to run `nodetool cleanup` to remove excess replication."); strategyOptions.put(dc, String.valueOf(desiredRf)); ks.setStrategy_options(strategyOptions); ks.setCf_defs(ImmutableList.<CfDef>of()); client.system_update_keyspace(ks); } else if (currentRF == desiredRf) { log.info("Did not change AtlasDB replication factor."); } else { logErrorOrThrow( "We only support replication up to 3. Attempted to go from " + currentRF + " to " + desiredRf + ".", safetyDisabled); } }