@Test public void getPuttedValueFromTheMap() { HazelcastClient hClient = getHazelcastClient(); Map<String, String> clientMap = hClient.getMap("getPuttedValueFromTheMap"); int size = clientMap.size(); clientMap.put("1", "Z"); String value = clientMap.get("1"); assertEquals("Z", value); assertEquals(size + 1, clientMap.size()); }
/** * set attachments * * @param attachment * @return context */ public RpcContext setAttachments(Map<String, String> attachment) { this.attachments.clear(); if (attachment != null && attachment.size() > 0) { this.attachments.putAll(attachment); } return this; }
@Test public void putToTheMap() throws InterruptedException { HazelcastClient hClient = getHazelcastClient(); Map<String, String> clientMap = hClient.getMap("putToTheMap"); assertEquals(0, clientMap.size()); String result = clientMap.put("1", "CBDEF"); assertNull(result); assertEquals("CBDEF", clientMap.get("1")); assertEquals("CBDEF", clientMap.get("1")); assertEquals("CBDEF", clientMap.get("1")); assertEquals(1, clientMap.size()); result = clientMap.put("1", "B"); assertEquals("CBDEF", result); assertEquals("B", clientMap.get("1")); assertEquals("B", clientMap.get("1")); }
/* Sends a Gossip message to an unreachable member */ void doGossipToUnreachableMember(Message message) { double liveEndpoints = liveEndpoints_.size(); double unreachableEndpoints = unreachableEndpoints_.size(); if (unreachableEndpoints > 0) { /* based on some probability */ double prob = unreachableEndpoints / (liveEndpoints + 1); double randDbl = random_.nextDouble(); if (randDbl < prob) sendGossip(message, unreachableEndpoints_.keySet()); } }
@Test public void putAll() { HazelcastClient hClient = getHazelcastClient(); IMap map = hClient.getMap("putAll"); int counter = 100; Set keys = new HashSet(counter); for (int i = 0; i < counter; i++) { keys.add(i); } Map all = map.getAll(keys); assertEquals(0, all.size()); Map tempMap = new HashMap(); for (int i = 0; i < counter; i++) { tempMap.put(i, i); } map.putAll(tempMap); for (int i = 0; i < counter; i++) { assertEquals(i, map.get(i)); } all = map.getAll(keys); assertEquals(counter, all.size()); }
void completed(Differencer differencer) { logger.debug( String.format( "[repair #%s] Repair completed between %s and %s on %s", getName(), differencer.r1.endpoint, differencer.r2.endpoint, differencer.cfname)); RepairJob job = activeJobs.get(differencer.cfname); if (job == null) { assert terminated; return; } if (job.completedSynchronization(differencer)) { activeJobs.remove(differencer.cfname); String remaining = activeJobs.size() == 0 ? "" : String.format( " (%d remaining column family to sync for this session)", activeJobs.size()); logger.info( String.format( "[repair #%s] %s is fully synced%s", getName(), differencer.cfname, remaining)); if (activeJobs.isEmpty()) completed.signalAll(); } }
/* Gossip to a seed for facilitating partition healing */ void doGossipToSeed(Message message) { int size = seeds_.size(); if (size > 0) { if (size == 1 && seeds_.contains(localEndpoint_)) { return; } if (liveEndpoints_.size() == 0) { sendGossip(message, seeds_); } else { /* Gossip with the seed with some probability. */ double probability = seeds_.size() / (double) (liveEndpoints_.size() + unreachableEndpoints_.size()); double randDbl = random_.nextDouble(); if (randDbl <= probability) sendGossip(message, seeds_); } } }
private Map<String, HeaderDefinition> buildHeaderDefinitions() throws MojoFailureException { // like mappings, first get default definitions final Map<String, HeaderDefinition> headers = new HashMap<String, HeaderDefinition>(HeaderType.defaultDefinitions()); // and then override them with those provided in properties file for (String resource : headerDefinitions) { try { InputSource source = new InputSource(finder.findResource(resource).openStream()); source.setEncoding(encoding); final AdditionalHeaderDefinition fileDefinitions = new AdditionalHeaderDefinition(XMLDoc.from(source, true)); final Map<String, HeaderDefinition> map = fileDefinitions.getDefinitions(); debug("%d header definitions loaded from '%s'", map.size(), resource); headers.putAll(map); } catch (IOException ex) { throw new MojoFailureException("Error reading header definition: " + resource, ex); } } // force inclusion of unknow item to manage unknown files headers.put(HeaderType.UNKNOWN.getDefinition().getType(), HeaderType.UNKNOWN.getDefinition()); return headers; }
@Test public void iterateOverMapKeys() { HazelcastClient hClient = getHazelcastClient(); Map<String, String> map = hClient.getMap("iterateOverMapKeys"); map.put("1", "A"); map.put("2", "B"); map.put("3", "C"); Set<String> keySet = map.keySet(); assertEquals(3, keySet.size()); Set<String> s = new HashSet<String>(); for (String string : keySet) { s.add(string); assertTrue(Arrays.asList("1", "2", "3").contains(string)); } assertEquals(3, s.size()); Iterator<String> iterator = keySet.iterator(); while (iterator.hasNext()) { iterator.next(); iterator.remove(); } assertEquals(0, map.size()); }
/** * Remove particular entry from the trash directory or subdirectory. * * @param parentId Parent ID. * @param id Entry id. * @throws IgniteCheckedException If delete failed for some reason. */ private void deleteDirectory(IgniteUuid parentId, IgniteUuid id) throws IgniteCheckedException { assert parentId != null; assert id != null; while (true) { IgfsFileInfo info = meta.info(id); if (info != null) { assert info.isDirectory(); Map<String, IgfsListingEntry> listing = info.listing(); if (listing.isEmpty()) return; // Directory is empty. Map<String, IgfsListingEntry> delListing; if (listing.size() <= MAX_DELETE_BATCH) delListing = listing; else { delListing = new HashMap<>(MAX_DELETE_BATCH, 1.0f); int i = 0; for (Map.Entry<String, IgfsListingEntry> entry : listing.entrySet()) { delListing.put(entry.getKey(), entry.getValue()); if (++i == MAX_DELETE_BATCH) break; } } GridCompoundFuture<Object, ?> fut = new GridCompoundFuture<>(); // Delegate to child folders. for (IgfsListingEntry entry : delListing.values()) { if (!cancelled) { if (entry.isDirectory()) deleteDirectory(id, entry.fileId()); else { IgfsFileInfo fileInfo = meta.info(entry.fileId()); if (fileInfo != null) { assert fileInfo.isFile(); fut.add(data.delete(fileInfo)); } } } else return; } fut.markInitialized(); // Wait for data cache to delete values before clearing meta cache. try { fut.get(); } catch (IgniteFutureCancelledCheckedException ignore) { // This future can be cancelled only due to IGFS shutdown. cancelled = true; return; } // Actual delete of folder content. Collection<IgniteUuid> delIds = meta.delete(id, delListing); if (delListing == listing && delListing.size() == delIds.size()) break; // All entries were deleted. } else break; // Entry was deleted concurrently. } }