/** Runs the merge protocol as a leader */ public void run() { // 1. Generate merge_id MergeId new_merge_id = MergeId.create(gms.local_addr); Collection<Address> coordsCopy = null; try { boolean success = setMergeId(null, new_merge_id); if (!success) { log.warn("failed to set my own merge_id (" + merge_id + ") to " + new_merge_id); return; } coordsCopy = new ArrayList<Address>(coords.keySet()); /* 2. Fetch the current Views/Digests from all subgroup coordinators */ success = getMergeDataFromSubgroupCoordinators(coords, new_merge_id, gms.merge_timeout); if (!success) throw new Exception( "merge leader did not get data from all partition coordinators " + coords.keySet()); /* 3. Remove rejected MergeData elements from merge_rsp and coords (so we'll send the new view * only to members who accepted the merge request) */ removeRejectedMergeRequests(coords.keySet()); if (merge_rsps.size() == 0) throw new Exception("did not get any merge responses from partition coordinators"); if (!coords .keySet() .contains( gms.local_addr)) // another member might have invoked a merge req on us before we // got there... throw new Exception("merge leader rejected merge request"); /* 4. Combine all views and digests into 1 View/1 Digest */ Vector<MergeData> merge_data = new Vector<MergeData>(merge_rsps.getResults().values()); MergeData combined_merge_data = consolidateMergeData(merge_data); if (combined_merge_data == null) throw new Exception("could not consolidate merge"); /* 4. Send the new View/Digest to all coordinators (including myself). On reception, they will install the digest and view in all of their subgroup members */ sendMergeView(coords.keySet(), combined_merge_data, new_merge_id); } catch (Throwable ex) { if (log.isWarnEnabled()) log.warn(gms.local_addr + ": " + ex.getLocalizedMessage() + ", merge is cancelled"); sendMergeCancelledMessage(coordsCopy, new_merge_id); } finally { gms.getViewHandler().resume(new_merge_id); stopMergeCanceller(); // this is probably not necessary /*5. if flush is in stack stop the flush for entire cluster [JGRP-700] - FLUSH: flushing should span merge */ gms.stopFlush(); if (log.isDebugEnabled()) log.debug(gms.local_addr + ": merge leader completed merge task"); thread = null; } }
public int size() { int retval = Global.BYTE_SIZE * 2; // type + merge_rejected retval += Global.BYTE_SIZE; // presence view retval += Global.BYTE_SIZE; // MergeView or View if (view != null) retval += view.serializedSize(); retval += Util.size(mbr); retval += Util.size(mbrs); retval += Global.BYTE_SIZE; // presence of join_rsp if (join_rsp != null) retval += join_rsp.serializedSize(); retval += Global.BYTE_SIZE; // presence for my_digest if (my_digest != null) retval += my_digest.serializedSize(); retval += Global.BYTE_SIZE; // presence for merge_id if (merge_id != null) retval += merge_id.size(); retval += Global.BYTE_SIZE; // boolean useFlushIfPresent return retval; }
public void run() { // 1. Generate merge_id final MergeId new_merge_id = MergeId.create(gms.local_addr); final Collection<Address> coordsCopy = new ArrayList<Address>(coords.keySet()); long start = System.currentTimeMillis(); try { _run(new_merge_id, coordsCopy); // might remove members from coordsCopy } catch (Throwable ex) { if (log.isWarnEnabled()) log.warn(gms.local_addr + ": " + ex + ", merge is cancelled"); sendMergeCancelledMessage(coordsCopy, new_merge_id); cancelMerge( new_merge_id); // the message above cancels the merge, too, but this is a 2nd line of // defense } finally { /* 5. if flush is in stack stop the flush for entire cluster [JGRP-700] - FLUSH: flushing should span merge */ if (gms.flushProtocolInStack) gms.stopFlush(); thread = null; } long diff = System.currentTimeMillis() - start; if (log.isDebugEnabled()) log.debug(gms.local_addr + ": merge " + new_merge_id + " took " + diff + " ms"); }
public String getMergeIdAsString() { return merge_id != null ? merge_id.toString() : null; }