private static void startShibbolethSNAA( String path, String prefix, String secretKeyURL, IUserAuthorization authorization, Injector injector, ShibbolethProxy shibbolethProxy) { log.debug( "Starting Shibboleth SNAA, path [" + path + "], prefix[" + prefix + "], secretKeyURL[" + secretKeyURL + "]"); Set<String> prefixes = new HashSet<String>(); prefixes.add(prefix); ShibbolethSNAAImpl shibSnaa = new ShibbolethSNAAImpl(prefixes, secretKeyURL, authorization, injector, shibbolethProxy); HttpContext context = server.createContext(path); Endpoint endpoint = Endpoint.create(shibSnaa); endpoint.publish(context); log.debug("Started shibboleth SNAA on " + server.getAddress() + path); }
private static Set<String> parseCSV(String str) { String[] split = str.split(","); Set<String> trimmedSplit = new HashSet<String>(); for (String string : split) { trimmedSplit.add(string.trim()); } return trimmedSplit; }
public void handleStreamEvent(StreamEvent event) { if (event.eventType == StreamEvent.Type.STREAM_PREPARED) { SessionInfo session = ((StreamEvent.SessionPreparedEvent) event).session; sessionsByHost.put(session.peer, session); } else if (event.eventType == StreamEvent.Type.FILE_PROGRESS) { ProgressInfo progressInfo = ((StreamEvent.ProgressEvent) event).progress; // update progress Set<ProgressInfo> progresses = progressByHost.get(progressInfo.peer); if (progresses == null) { progresses = Sets.newSetFromMap(new ConcurrentHashMap<ProgressInfo, Boolean>()); progressByHost.put(progressInfo.peer, progresses); } if (progresses.contains(progressInfo)) progresses.remove(progressInfo); progresses.add(progressInfo); StringBuilder sb = new StringBuilder(); sb.append("\rprogress: "); long totalProgress = 0; long totalSize = 0; for (Map.Entry<InetAddress, Set<ProgressInfo>> entry : progressByHost.entrySet()) { SessionInfo session = sessionsByHost.get(entry.getKey()); long size = session.getTotalSizeToSend(); long current = 0; int completed = 0; for (ProgressInfo progress : entry.getValue()) { if (progress.currentBytes == progress.totalBytes) completed++; current += progress.currentBytes; } totalProgress += current; totalSize += size; sb.append("[").append(entry.getKey()); sb.append(" ").append(completed).append("/").append(session.getTotalFilesToSend()); sb.append(" (").append(size == 0 ? 100L : current * 100L / size).append("%)] "); } long time = System.nanoTime(); long deltaTime = Math.max(1L, TimeUnit.NANOSECONDS.toMillis(time - lastTime)); lastTime = time; long deltaProgress = totalProgress - lastProgress; lastProgress = totalProgress; sb.append("[total: ") .append(totalSize == 0 ? 100L : totalProgress * 100L / totalSize) .append("% - "); sb.append(mbPerSec(deltaProgress, deltaTime)).append("MB/s"); sb.append(" (avg: ") .append(mbPerSec(totalProgress, TimeUnit.NANOSECONDS.toMillis(time - start))) .append("MB/s)]"); System.out.print(sb.toString()); } }
@Override public void init(String keyspace) { Iterator<InetAddress> hostiter = hosts.iterator(); while (hostiter.hasNext()) { try { // Query endpoint to ranges map and schemas from thrift InetAddress host = hostiter.next(); Cassandra.Client client = createThriftClient( host.getHostAddress(), rpcPort, this.user, this.passwd, this.transportFactory); setPartitioner(client.describe_partitioner()); Token.TokenFactory tkFactory = getPartitioner().getTokenFactory(); for (TokenRange tr : client.describe_ring(keyspace)) { Range<Token> range = new Range<>( tkFactory.fromString(tr.start_token), tkFactory.fromString(tr.end_token), getPartitioner()); for (String ep : tr.endpoints) { addRangeForEndpoint(range, InetAddress.getByName(ep)); } } String query = String.format( "SELECT * FROM %s.%s WHERE keyspace_name = '%s'", Keyspace.SYSTEM_KS, SystemKeyspace.SCHEMA_COLUMNFAMILIES_CF, keyspace); CqlResult result = client.execute_cql3_query( ByteBufferUtil.bytes(query), Compression.NONE, ConsistencyLevel.ONE); for (CqlRow row : result.rows) { CFMetaData metadata = CFMetaData.fromThriftCqlRow(row); knownCfs.put(metadata.cfName, metadata); } break; } catch (Exception e) { if (!hostiter.hasNext()) throw new RuntimeException("Could not retrieve endpoint ranges: ", e); } } }
public void init(String keyspace) { outputHandler.output( String.format( "Starting client (and waiting %d seconds for gossip) ...", StorageService.RING_DELAY / 1000)); try { // Init gossip StorageService.instance.initClient(); Set<InetAddress> hosts = Gossiper.instance.getLiveMembers(); hosts.remove(FBUtilities.getLocalAddress()); if (hosts.isEmpty()) throw new IllegalStateException( "Cannot load any sstable, no live member found in the cluster"); // Query endpoint to ranges map and schemas from thrift String host = hosts.iterator().next().toString().substring(1); int port = DatabaseDescriptor.getRpcPort(); Cassandra.Client client = createThriftClient(host, port); List<TokenRange> tokenRanges = client.describe_ring(keyspace); List<KsDef> ksDefs = client.describe_keyspaces(); Token.TokenFactory tkFactory = StorageService.getPartitioner().getTokenFactory(); try { for (TokenRange tr : tokenRanges) { Range range = new Range(tkFactory.fromString(tr.start_token), tkFactory.fromString(tr.end_token)); for (String ep : tr.endpoints) { addRangeForEndpoint(range, InetAddress.getByName(ep)); } } } catch (UnknownHostException e) { throw new RuntimeException("Got an unknow host from describe_ring()", e); } for (KsDef ksDef : ksDefs) { Set<String> cfs = new HashSet<String>(); for (CfDef cfDef : ksDef.cf_defs) cfs.add(cfDef.name); knownCfs.put(ksDef.name, cfs); } } catch (Exception e) { throw new RuntimeException(e); } }
public static void main(String args[]) throws IOException { Options options = Options.parseArgs(args); try { // load keyspace descriptions. DatabaseDescriptor.loadSchemas(); String ksName = null; String cfName = null; Map<Descriptor, Set<Component>> parsedFilenames = new HashMap<Descriptor, Set<Component>>(); for (String filename : options.filenames) { File file = new File(filename); if (!file.exists()) { System.out.println("Skipping inexisting file " + file); continue; } Pair<Descriptor, Component> pair = SSTable.tryComponentFromFilename(file.getParentFile(), file.getName()); if (pair == null) { System.out.println("Skipping non sstable file " + file); continue; } Descriptor desc = pair.left; if (ksName == null) ksName = desc.ksname; else if (!ksName.equals(desc.ksname)) throw new IllegalArgumentException("All sstables must be part of the same keyspace"); if (cfName == null) cfName = desc.cfname; else if (!cfName.equals(desc.cfname)) throw new IllegalArgumentException("All sstables must be part of the same column family"); Set<Component> components = new HashSet<Component>( Arrays.asList( new Component[] { Component.DATA, Component.PRIMARY_INDEX, Component.FILTER, Component.COMPRESSION_INFO, Component.STATS })); Iterator<Component> iter = components.iterator(); while (iter.hasNext()) { Component component = iter.next(); if (!(new File(desc.filenameFor(component)).exists())) iter.remove(); } parsedFilenames.put(desc, components); } if (ksName == null || cfName == null) { System.err.println("No valid sstables to split"); System.exit(1); } // Do not load sstables since they might be broken Table table = Table.openWithoutSSTables(ksName); ColumnFamilyStore cfs = table.getColumnFamilyStore(cfName); String snapshotName = "pre-split-" + System.currentTimeMillis(); List<SSTableReader> sstables = new ArrayList<SSTableReader>(); for (Map.Entry<Descriptor, Set<Component>> fn : parsedFilenames.entrySet()) { try { SSTableReader sstable = SSTableReader.openNoValidation(fn.getKey(), fn.getValue(), cfs.metadata); sstables.add(sstable); if (options.snapshot) { File snapshotDirectory = Directories.getSnapshotDirectory(sstable.descriptor, snapshotName); sstable.createLinks(snapshotDirectory.getPath()); } } catch (Exception e) { System.err.println(String.format("Error Loading %s: %s", fn.getKey(), e.getMessage())); if (options.debug) e.printStackTrace(System.err); } } if (options.snapshot) System.out.println( String.format("Pre-split sstables snapshotted into snapshot %s", snapshotName)); cfs.getDataTracker().markCompacting(sstables); for (SSTableReader sstable : sstables) { try { new SSTableSplitter(cfs, sstable, options.sizeInMB).split(); // Remove the sstable sstable.markCompacted(); sstable.releaseReference(); } catch (Exception e) { System.err.println(String.format("Error splitting %s: %s", sstable, e.getMessage())); if (options.debug) e.printStackTrace(System.err); } } SSTableDeletingTask.waitForDeletions(); System.exit(0); // We need that to stop non daemonized threads } catch (Exception e) { System.err.println(e.getMessage()); if (options.debug) e.printStackTrace(System.err); System.exit(1); } }
public boolean validateColumnFamily(String keyspace, String cfName) { Set<String> cfs = knownCfs.get(keyspace); return cfs != null && cfs.contains(cfName); }
public static void main(String args[]) throws IOException { Options options = Options.parseArgs(args); try { // load keyspace descriptions. DatabaseDescriptor.loadSchemas(false); if (Schema.instance.getCFMetaData(options.keyspace, options.cf) == null) throw new IllegalArgumentException( String.format("Unknown keyspace/columnFamily %s.%s", options.keyspace, options.cf)); Keyspace keyspace = Keyspace.openWithoutSSTables(options.keyspace); ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(options.cf); OutputHandler handler = new OutputHandler.SystemOutput(false, options.debug); Directories.SSTableLister lister = cfs.directories.sstableLister(); if (options.snapshot != null) lister.onlyBackups(true).snapshots(options.snapshot); else lister.includeBackups(false); Collection<SSTableReader> readers = new ArrayList<SSTableReader>(); // Upgrade sstables for (Map.Entry<Descriptor, Set<Component>> entry : lister.list().entrySet()) { Set<Component> components = entry.getValue(); if (!components.contains(Component.DATA) || !components.contains(Component.PRIMARY_INDEX)) continue; try { SSTableReader sstable = SSTableReader.openNoValidation(entry.getKey(), components, cfs.metadata); if (sstable.descriptor.version.equals(Descriptor.Version.CURRENT)) continue; readers.add(sstable); } catch (Exception e) { JVMStabilityInspector.inspectThrowable(e); System.err.println(String.format("Error Loading %s: %s", entry.getKey(), e.getMessage())); if (options.debug) e.printStackTrace(System.err); continue; } } int numSSTables = readers.size(); handler.output("Found " + numSSTables + " sstables that need upgrading."); for (SSTableReader sstable : readers) { try { Upgrader upgrader = new Upgrader(cfs, sstable, handler); upgrader.upgrade(); if (!options.keepSource) { // Remove the sstable (it's been copied by upgrade) System.out.format("Deleting table %s.%n", sstable.descriptor.baseFilename()); sstable.markObsolete(); sstable.selfRef().release(); } } catch (Exception e) { System.err.println(String.format("Error upgrading %s: %s", sstable, e.getMessage())); if (options.debug) e.printStackTrace(System.err); } } CompactionManager.instance.finishCompactionsAndShutdown(5, TimeUnit.MINUTES); SSTableDeletingTask.waitForDeletions(); System.exit(0); } catch (Exception e) { System.err.println(e.getMessage()); if (options.debug) e.printStackTrace(System.err); System.exit(1); } }