public KsDef toThrift() { List<CfDef> cfDefs = new ArrayList<CfDef>(); for (CFMetaData cfm : cfMetaData().values()) cfDefs.add(cfm.toThrift()); KsDef ksdef = new KsDef(name, strategyClass.getName(), cfDefs); ksdef.setStrategy_options(strategyOptions); ksdef.setDurable_writes(durableWrites); return ksdef; }
public void setFieldValue(_Fields field, Object value) { switch (field) { case NAME: if (value == null) { unsetName(); } else { setName((String) value); } break; case STRATEGY_CLASS: if (value == null) { unsetStrategy_class(); } else { setStrategy_class((String) value); } break; case STRATEGY_OPTIONS: if (value == null) { unsetStrategy_options(); } else { setStrategy_options((Map<String, String>) value); } break; case REPLICATION_FACTOR: if (value == null) { unsetReplication_factor(); } else { setReplication_factor((Integer) value); } break; case CF_DEFS: if (value == null) { unsetCf_defs(); } else { setCf_defs((List<CfDef>) value); } break; case DURABLE_WRITES: if (value == null) { unsetDurable_writes(); } else { setDurable_writes((Boolean) value); } break; } }
/* * This keyspace exists because we need a way to pull the datacenter information and they only * way to do it is if you have a valid keyspace set up. We will pull the info from here * so we can accurately create the actually NetworkTopologyStrategy keyspace. */ private static void ensureTestKeyspaceExists(Cassandra.Client client) { try { try { client.describe_keyspace(CassandraConstants.SIMPLE_RF_TEST_KEYSPACE); return; } catch (NotFoundException e) { // need to create key space } KsDef testKs = new KsDef( CassandraConstants.SIMPLE_RF_TEST_KEYSPACE, CassandraConstants.SIMPLE_STRATEGY, ImmutableList.<CfDef>of()); testKs.setStrategy_options( ImmutableMap.of(CassandraConstants.REPLICATION_FACTOR_OPTION, "1")); client.system_add_keyspace(testKs); } catch (Exception e) { log.warn(e.getMessage(), e); } }
/** * Builds out a KsDef, does not persist. * * @param ksName * @return * @throws Exception */ protected KsDef setupOtherKeyspace( Configuration configuration, String ksName, boolean addMetaData) throws Exception { CfDef cf = new CfDef(ksName, "OtherCf1"); cf.setKey_validation_class("UTF8Type"); cf.setComparator_type("UTF8Type"); if (addMetaData) { cf.addToColumn_metadata( new ColumnDef(ByteBufferUtil.bytes("col_name_utf8"), UTF8Type.class.getName())); cf.addToColumn_metadata( new ColumnDef(ByteBufferUtil.bytes("col_name_bytes"), BytesType.class.getName())); cf.addToColumn_metadata( new ColumnDef(ByteBufferUtil.bytes("col_name_int"), IntegerType.class.getName())); cf.addToColumn_metadata( new ColumnDef(ByteBufferUtil.bytes("col_name_long"), LongType.class.getName())); cf.addToColumn_metadata( new ColumnDef(ByteBufferUtil.bytes("col_name_timeuuid"), TimeUUIDType.class.getName())); } KsDef ks = new KsDef(ksName, "org.apache.cassandra.locator.SimpleStrategy", Arrays.asList(cf)); ks.setStrategy_options( KSMetaData.optsWithRF( configuration.getInt(CassandraClientHolder.CONF_PARAM_REPLICATION_FACTOR, 1))); return ks; }
static void checkAndSetReplicationFactor( Cassandra.Client client, KsDef ks, boolean freshInstance, int desiredRf, boolean safetyDisabled) throws InvalidRequestException, SchemaDisagreementException, TException { if (freshInstance) { Set<String> dcs = CassandraVerifier.sanityCheckDatacenters(client, desiredRf, safetyDisabled); // If RF exceeds # hosts, then Cassandra will reject writes ks.setStrategy_options(Maps2.createConstantValueMap(dcs, String.valueOf(desiredRf))); return; } final Set<String> dcs; if (CassandraConstants.SIMPLE_STRATEGY.equals(ks.getStrategy_class())) { int currentRF = Integer.parseInt( ks.getStrategy_options().get(CassandraConstants.REPLICATION_FACTOR_OPTION)); String errorMessage = "This cassandra cluster is running using the simple partitioning stragegy. " + "This partitioner is not rack aware and is not intended for use on prod. " + "This will have to be fixed by manually configuring to the network partitioner " + "and running the appropriate repairs. " + "Contact the AtlasDB team to perform these steps."; if (currentRF != 1) { logErrorOrThrow(errorMessage, safetyDisabled); } // Automatically convert RF=1 to look like network partitioner. dcs = CassandraVerifier.sanityCheckDatacenters(client, desiredRf, safetyDisabled); if (dcs.size() > 1) { logErrorOrThrow(errorMessage, safetyDisabled); } if (!safetyDisabled) { ks.setStrategy_class(CassandraConstants.NETWORK_STRATEGY); ks.setStrategy_options(ImmutableMap.of(dcs.iterator().next(), "1")); } } else { dcs = CassandraVerifier.sanityCheckDatacenters(client, desiredRf, safetyDisabled); } Map<String, String> strategyOptions = Maps.newHashMap(ks.getStrategy_options()); for (String dc : dcs) { if (strategyOptions.get(dc) == null) { logErrorOrThrow( "The datacenter for this cassandra cluster is invalid. " + " failed dc: " + dc + " strategyOptions: " + strategyOptions, safetyDisabled); } } String dc = dcs.iterator().next(); int currentRF = Integer.parseInt(strategyOptions.get(dc)); // We need to worry about user not running repair and user skipping replication levels. if (currentRF == 1 && desiredRf == 2) { log.error( "Upping AtlasDB replication factor from 1 to 2. User should run " + "`nodetool repair` on cluster if they have not already!"); strategyOptions.put(dc, String.valueOf(desiredRf)); ks.setStrategy_options(strategyOptions); } else if (currentRF == 1 && desiredRf == CassandraConstants.DEFAULT_REPLICATION_FACTOR) { log.error( "Upping AtlasDB replication factor from 1 " + "to 3 is NOT allowed directly.\n" + "Increase replication factor to 2 first, then run `nodetool repair`. If it succeeds, increase replication factor to 3, and run `nodetool repair`"); } else if (currentRF == 2 && desiredRf == CassandraConstants.DEFAULT_REPLICATION_FACTOR) { strategyOptions.put(dc, String.valueOf(desiredRf)); ks.setStrategy_options(strategyOptions); ks.setCf_defs(ImmutableList.<CfDef>of()); client.system_update_keyspace(ks); log.warn( "Updating AtlasDB replication factor from " + currentRF + " to " + desiredRf + " process are NOT completed!" + " User may want to run `nodetool repair` to make all replicas consistent."); } else if (currentRF > desiredRf) { // We are moving to a lower RF, this should be always safe from a consistency // rule standpoint log.error( "Reducing AtlasDB replication factor from " + currentRF + " to " + desiredRf + ". User may want to run `nodetool cleanup` to remove excess replication."); strategyOptions.put(dc, String.valueOf(desiredRf)); ks.setStrategy_options(strategyOptions); ks.setCf_defs(ImmutableList.<CfDef>of()); client.system_update_keyspace(ks); } else if (currentRF == desiredRf) { log.info("Did not change AtlasDB replication factor."); } else { logErrorOrThrow( "We only support replication up to 3. Attempted to go from " + currentRF + " to " + desiredRf + ".", safetyDisabled); } }