/** * Message contains MethodCall. Execute it against *this* object and return result. Use * MethodCall.invoke() to do this. Return result. */ public Object handle(Message req) throws Exception { if (server_obj == null) { log.error(Util.getMessage("NoMethodHandlerIsRegisteredDiscardingRequest")); return null; } if (req == null || req.getLength() == 0) { log.error(Util.getMessage("MessageOrMessageBufferIsNull")); return null; } Object body = req_marshaller != null ? req_marshaller.objectFromBuffer(req.getRawBuffer(), req.getOffset(), req.getLength()) : req.getObject(); if (!(body instanceof MethodCall)) throw new IllegalArgumentException("message does not contain a MethodCall object"); MethodCall method_call = (MethodCall) body; if (log.isTraceEnabled()) log.trace("[sender=%s], method_call: %s", req.getSrc(), method_call); if (method_call.getMode() == MethodCall.ID) { if (method_lookup == null) throw new Exception( String.format( "MethodCall uses ID=%d, but method_lookup has not been set", method_call.getId())); Method m = method_lookup.findMethod(method_call.getId()); if (m == null) throw new Exception("no method found for " + method_call.getId()); method_call.setMethod(m); } return method_call.invoke(server_obj); }
/** Callback invoked by the protocol stack to deliver a message batch */ public void up(MessageBatch batch) { if (stats) { received_msgs += batch.size(); received_bytes += batch.length(); } // discard local messages (sent by myself to me) if (discard_own_messages && local_addr != null && batch.sender() != null && local_addr.equals(batch.sender())) return; for (Message msg : batch) { if (up_handler != null) { try { up_handler.up(new Event(Event.MSG, msg)); } catch (Throwable t) { log.error(Util.getMessage("UpHandlerFailure"), t); } } else if (receiver != null) { try { receiver.receive(msg); } catch (Throwable t) { log.error(Util.getMessage("ReceiverFailure"), t); } } } }
protected List<PingData> read(InputStream in) { List<PingData> retval = null; try { while (true) { try { String name_str = Util.readToken(in); String uuid_str = Util.readToken(in); String addr_str = Util.readToken(in); String coord_str = Util.readToken(in); if (name_str == null || uuid_str == null || addr_str == null || coord_str == null) break; UUID uuid = null; try { long tmp = Long.valueOf(uuid_str); uuid = new UUID(0, tmp); } catch (Throwable t) { uuid = UUID.fromString(uuid_str); } PhysicalAddress phys_addr = new IpAddress(addr_str); boolean is_coordinator = coord_str.trim().equals("T") || coord_str.trim().equals("t"); if (retval == null) retval = new ArrayList<>(); retval.add(new PingData(uuid, true, name_str, phys_addr).coord(is_coordinator)); } catch (Throwable t) { log.error(Util.getMessage("FailedReadingLineOfInputStream"), t); } } return retval; } finally { Util.close(in); } }
public void up(MessageBatch batch) { // Sort fork messages by fork-stack-id Map<String, List<Message>> map = new HashMap<>(); for (Message msg : batch) { ForkHeader hdr = (ForkHeader) msg.getHeader(id); if (hdr != null) { batch.remove(msg); List<Message> list = map.get(hdr.fork_stack_id); if (list == null) { list = new ArrayList<>(); map.put(hdr.fork_stack_id, list); } list.add(msg); } } // Now pass fork messages up, batched by fork-stack-id for (Map.Entry<String, List<Message>> entry : map.entrySet()) { String fork_stack_id = entry.getKey(); List<Message> list = entry.getValue(); Protocol bottom_prot = get(fork_stack_id); if (bottom_prot == null) continue; MessageBatch mb = new MessageBatch( batch.dest(), batch.sender(), batch.clusterName(), batch.multicast(), list); try { bottom_prot.up(mb); } catch (Throwable t) { log.error(Util.getMessage("FailedPassingUpBatch"), t); } } if (!batch.isEmpty()) up_prot.up(batch); }
/** * 1. Get all the fragment buffers 2. When all are received -> Assemble them into one big buffer * 3. Read headers and byte buffer from big buffer 4. Set headers and buffer in msg 5. Pass msg up * the stack */ private Message unfragment(Message msg, FragHeader hdr) { Address sender = msg.getSrc(); FragmentationTable frag_table = fragment_list.get(sender); if (frag_table == null) { frag_table = new FragmentationTable(sender); try { fragment_list.add(sender, frag_table); } catch ( IllegalArgumentException x) { // the entry has already been added, probably in parallel from another thread frag_table = fragment_list.get(sender); } } num_received_frags++; byte[] buf = frag_table.add(hdr.id, hdr.frag_id, hdr.num_frags, msg.getBuffer()); if (buf == null) return null; try { DataInput in = new ByteArrayDataInputStream(buf); Message assembled_msg = new Message(false); assembled_msg.readFrom(in); assembled_msg.setSrc(sender); // needed ? YES, because fragments have a null src !! if (log.isTraceEnabled()) log.trace("assembled_msg is " + assembled_msg); num_received_msgs++; return assembled_msg; } catch (Exception e) { log.error(Util.getMessage("FailedUnfragmentingAMessage"), e); return null; } }
public Map<String, String> handleProbe(String... keys) { Map<String, String> map = new HashMap<>(3); for (String key : keys) { if (key.startsWith("jmx")) { handleJmx(map, key); continue; } if (key.startsWith("reset-stats")) { resetAllStats(); continue; } if (key.startsWith("invoke") || key.startsWith("op")) { int index = key.indexOf("="); if (index != -1) { try { handleOperation(map, key.substring(index + 1)); } catch (Throwable throwable) { log.error( Util.getMessage("OperationInvocationFailure"), key.substring(index + 1), throwable); } } } } return map; }
@ManagedOperation(description = "Disconnects the channel if connected") public synchronized void disconnect() { switch (state) { case OPEN: case CLOSED: return; case CONNECTING: case CONNECTED: if (cluster_name != null) { // Send down a DISCONNECT event, which travels down to the GMS, where a response is // returned try { down(new Event(Event.DISCONNECT, local_addr)); // DISCONNECT is handled by each layer } catch (Throwable t) { log.error(Util.getMessage("DisconnectFailure"), local_addr, t); } } state = State.OPEN; stopStack(true, false); notifyChannelDisconnected(this); init(); // sets local_addr=null; changed March 18 2003 (bela) -- prevented successful // rejoining break; default: throw new IllegalStateException("state " + state + " unknown"); } }
/** * Creates a byte[] representation of the PingData, but DISCARDING the view it contains. * * @param data the PingData instance to serialize. * @return */ protected byte[] serializeWithoutView(PingData data) { final PingData clone = new PingData( data.getAddress(), data.isServer(), data.getLogicalName(), data.getPhysicalAddr()) .coord(data.isCoord()); try { return Util.streamableToByteBuffer(clone); } catch (Exception e) { log.error(Util.getMessage("ErrorSerializingPingData"), e); return null; } }
/** * Unmarshal the original message (in the payload) and then pass it up (unless already delivered) * * @param msg */ protected void unwrapAndDeliver(final Message msg, boolean flush_ack) { try { Message msg_to_deliver = Util.streamableFromBuffer( Message.class, msg.getRawBuffer(), msg.getOffset(), msg.getLength()); SequencerHeader hdr = (SequencerHeader) msg_to_deliver.getHeader(this.id); if (flush_ack) hdr.flush_ack = true; deliver(msg_to_deliver, new Event(Event.MSG, msg_to_deliver), hdr); } catch (Exception ex) { log.error(Util.getMessage("FailureUnmarshallingBuffer"), ex); } }
protected void sendRequest(Address dest, Type type, long requestId, Object object) { Request req = new Request(type, object, requestId); Message msg = new Message(dest, req).putHeader(id, new ExecutorHeader()); if (bypass_bundling) msg.setFlag(Message.Flag.DONT_BUNDLE); if (log.isTraceEnabled()) log.trace("[" + local_addr + "] --> [" + (dest == null ? "ALL" : dest) + "] " + req); try { down_prot.down(msg); } catch (Exception ex) { log.error(Util.getMessage("FailedSending") + type + " request: " + ex); } }
protected void handleTaskRejectedResponse(Address source, long requestId) { Runnable runnable = _awaitingReturn.remove(new Owner(source, requestId)); if (runnable != null) { _awaitingConsumer.add(runnable); Long taskRequestId = _requestId.get(runnable); if (taskRequestId != requestId) { log.warn("Task Request Id doesn't match in rejection"); } sendToCoordinator(RUN_REQUEST, taskRequestId, local_addr); } else { log.error(Util.getMessage("ErrorResubmittingTaskForRequestId") + requestId); } }
protected void forward(final Message msg, long seqno, boolean flush) { Address target = coord; if (target == null) return; byte type = flush ? SequencerHeader.FLUSH : SequencerHeader.FORWARD; try { SequencerHeader hdr = new SequencerHeader(type, seqno); Message forward_msg = new Message(target, Util.streamableToBuffer(msg)).putHeader(this.id, hdr); down_prot.down(new Event(Event.MSG, forward_msg)); forwarded_msgs++; } catch (Exception ex) { log.error(Util.getMessage("FailedForwardingMessageTo") + msg.getDest(), ex); } }
protected void stopStack(boolean stop, boolean destroy) { if (prot_stack != null) { try { if (stop) prot_stack.stopStack(cluster_name); if (destroy) prot_stack.destroy(); } catch (Exception e) { log.error(Util.getMessage("StackDestroyFailure"), e); } TP transport = prot_stack.getTransport(); if (transport != null) transport.unregisterProbeHandler(probe_handler); } }
@Override protected void sendMcastDiscoveryRequest(Message msg) { try { if (msg.getSrc() == null) msg.setSrc(local_addr); ByteArrayDataOutputStream out = new ByteArrayDataOutputStream(128); msg.writeTo(out); for (int i = bind_port; i <= bind_port + port_range; i++) { DatagramPacket packet = new DatagramPacket(out.buffer(), 0, out.position(), dest_addr, i); sock.send(packet); } } catch (Exception ex) { log.error(Util.getMessage("FailedSendingDiscoveryRequest"), ex); } }
private void drainDownQueue() { if (log.isTraceEnabled()) { int size = downMessageQueue.size(); if (size > 0) log.trace("draining %d messages from the down queue", size); } while (true) { try { Message tmp = downMessageQueue.poll(0L, TimeUnit.MILLISECONDS); if (tmp == null) break; encryptAndSend(tmp); } catch (Throwable t) { log.error(Util.getMessage("FailedSendingMessageDownWhenDrainingQueue"), t); } } }
/** The coordinator itself wants to leave the group */ public void leave(Address mbr) { if (mbr == null) { if (log.isErrorEnabled()) log.error(Util.getMessage("MemberSAddressIsNull")); return; } if (mbr.equals(gms.local_addr)) leaving = true; gms.getViewHandler().add(new Request(Request.LEAVE, mbr, false)); gms.getViewHandler() .stop(true); // wait until all requests have been processed, then close the queue and leave // If we're the coord leaving, ignore gms.leave_timeout: // https://issues.jboss.org/browse/JGRP-1509 long timeout = (long) (Math.max(gms.leave_timeout, gms.view_ack_collection_timeout) * 1.10); gms.getViewHandler().waitUntilCompleted(timeout); }
/** * used to drain the up queue - synchronized so we can call it safely despite access from * potentially two threads at once */ private void drainUpQueue() { if (log.isTraceEnabled()) { int size = upMessageQueue.size(); if (size > 0) log.trace("draining %d messages from the up queue", size); } while (true) { try { Message tmp = upMessageQueue.poll(0L, TimeUnit.MILLISECONDS); if (tmp == null) break; Message msg = decryptMessage(null, tmp.copy()); // need to copy for possible xmits if (msg != null) up_prot.up(new Event(Event.MSG, msg)); } catch (Throwable t) { log.error(Util.getMessage("FailedDecryptingAndSendingMessageUpWhenDrainingQueue"), t); } } }
public void up(MessageBatch batch) { for (Message msg : batch) { if (msg.isFlagSet(Message.Flag.NO_TOTAL_ORDER) || msg.isFlagSet(Message.Flag.OOB) || msg.getHeader(id) == null) continue; batch.remove(msg); // simplistic implementation try { up(new Event(Event.MSG, msg)); } catch (Throwable t) { log.error(Util.getMessage("FailedPassingUpMessage"), t); } } if (!batch.isEmpty()) up_prot.up(batch); }
private void initializeNewSymmetricKey(boolean merge_view) { try { if (changeKeysOnViewChange || !keyServer || merge_view) { log.debug("initalizing new ciphers"); initSymKey(); initSymCiphers(getSymAlgorithm(), getSecretKey()); } } catch (Exception e) { log.error(Util.getMessage("CouldNotInitializeNewCiphers"), e); if (e instanceof RuntimeException) { throw (RuntimeException) e; } else { throw new IllegalStateException(e); } } }
/** * Invokes an operation and puts the return value into map * * @param map * @param operation Protocol.OperationName[args], e.g. STABLE.foo[arg1 arg2 arg3] */ protected void handleOperation(Map<String, String> map, String operation) throws Exception { int index = operation.indexOf("."); if (index == -1) throw new IllegalArgumentException( "operation " + operation + " is missing the protocol name"); String prot_name = operation.substring(0, index); Protocol prot = prot_stack.findProtocol(prot_name); if (prot == null) return; // less drastic than throwing an exception... int args_index = operation.indexOf("["); String method_name; if (args_index != -1) method_name = operation.substring(index + 1, args_index).trim(); else method_name = operation.substring(index + 1).trim(); String[] args = null; if (args_index != -1) { int end_index = operation.indexOf("]"); if (end_index == -1) throw new IllegalArgumentException("] not found"); List<String> str_args = Util.parseCommaDelimitedStrings(operation.substring(args_index + 1, end_index)); Object[] strings = str_args.toArray(); args = new String[strings.length]; for (int i = 0; i < strings.length; i++) args[i] = (String) strings[i]; } Method method = MethodCall.findMethod(prot.getClass(), method_name, args); if (method == null) { log.warn( Util.getMessage("MethodNotFound"), local_addr, prot.getClass().getSimpleName(), method_name); return; } MethodCall call = new MethodCall(method); Object[] converted_args = null; if (args != null) { converted_args = new Object[args.length]; Class<?>[] types = method.getParameterTypes(); for (int i = 0; i < args.length; i++) converted_args[i] = MethodCall.convert(args[i], types[i]); } Object retval = call.invoke(prot, converted_args); if (retval != null) map.put(prot_name + "." + method_name, retval.toString()); }
protected void handleExceptionResponse(Address source, long requestId, Throwable throwable) { Runnable runnable = _awaitingReturn.remove(new Owner(source, requestId)); if (runnable != null) { _requestId.remove(runnable); } // We can only notify of exception if it was a future if (runnable instanceof RunnableFuture<?>) { RunnableFuture<?> future = (RunnableFuture<?>) runnable; ExecutorNotification notifier = notifiers.remove(future); if (notifier != null) { notifier.throwableEncountered(throwable); } } else { // All we can do is log the error since their is no // way to return this to the user since they don't // have a future object. log.error( Util.getMessage("RuntimeErrorEncounteredFromClusterExecuteRunnableMethod"), throwable); } }
public void run() { final byte[] receive_buf = new byte[65535]; DatagramPacket packet = new DatagramPacket(receive_buf, receive_buf.length); DataInput inp; while (sock != null && receiver != null && Thread.currentThread().equals(receiver)) { packet.setData(receive_buf, 0, receive_buf.length); try { sock.receive(packet); inp = new ByteArrayDataInputStream(packet.getData(), packet.getOffset(), packet.getLength()); Message msg = new Message(); msg.readFrom(inp); up(msg); } catch (SocketException socketEx) { break; } catch (Throwable ex) { log.error(Util.getMessage("FailedReceivingPacketFrom"), packet.getSocketAddress(), ex); } } if (log.isTraceEnabled()) log.trace("receiver thread terminated"); }
/** * Send all fragments as separate messages (with same ID !). Example: * * <pre> * Given the generated ID is 2344, number of fragments=3, message {dst,src,buf} * would be fragmented into: * <p/> * [2344,3,0]{dst,src,buf1}, * [2344,3,1]{dst,src,buf2} and * [2344,3,2]{dst,src,buf3} * </pre> */ private void fragment(Message msg, long size) { Address dest = msg.getDest(), src = msg.getSrc(); long frag_id = curr_id.getAndIncrement(); // used as seqnos int num_frags; try { // write message into a byte buffer and fragment it ByteArrayDataOutputStream dos = new ByteArrayDataOutputStream((int) (size + 50)); msg.writeTo(dos); byte[] buffer = dos.buffer(); byte[][] fragments = Util.fragmentBuffer(buffer, frag_size, dos.position()); num_frags = fragments.length; num_sent_frags += num_frags; if (log.isTraceEnabled()) { StringBuilder sb = new StringBuilder(); sb.append("fragmenting packet to ") .append(dest != null ? dest.toString() : "<all members>") .append(" (size=") .append(buffer.length) .append(") into ") .append(num_frags) .append(" fragment(s) [frag_size=") .append(frag_size) .append(']'); log.trace(sb.toString()); } for (int i = 0; i < num_frags; i++) { Message frag_msg = new Message(dest, src, fragments[i]); FragHeader hdr = new FragHeader(frag_id, i, num_frags); frag_msg.putHeader(this.id, hdr); Event evt = new Event(Event.MSG, frag_msg); down_prot.down(evt); } } catch (Exception e) { log.error(Util.getMessage("ExceptionOccurredTryingToFragmentMessage"), e); } }
// try and decode secrey key sent from keyserver private SecretKeySpec decodeKey(byte[] encodedKey) throws Exception { byte[] keyBytes; synchronized (this) { keyBytes = asymCipher.doFinal(encodedKey); } try { SecretKeySpec keySpec = new SecretKeySpec(keyBytes, getAlgorithm(symAlgorithm)); // test reconstituted key to see if valid Cipher temp; if (symProvider != null && !symProvider.trim().isEmpty()) temp = Cipher.getInstance(symAlgorithm, symProvider); else temp = Cipher.getInstance(symAlgorithm); temp.init(Cipher.SECRET_KEY, keySpec); return keySpec; } catch (Exception e) { log.error(Util.getMessage("FailedDecodingKey"), e); return null; } }
public Object up(Event evt) { switch (evt.getType()) { case Event.MSG: Message msg = (Message) evt.getArg(); StateHeader hdr = (StateHeader) msg.getHeader(this.id); if (hdr == null) break; switch (hdr.type) { case StateHeader.STATE_REQ: state_requesters.add(msg.getSrc()); break; case StateHeader.STATE_RSP: handleStateRsp(hdr.getDigest(), msg.getSrc(), msg.getBuffer()); break; case StateHeader.STATE_EX: closeHoleFor(msg.getSrc()); handleException((Throwable) msg.getObject()); break; default: log.error("%s: type %s not known in StateHeader", local_addr, hdr.type); break; } return null; case Event.TMP_VIEW: case Event.VIEW_CHANGE: handleViewChange((View) evt.getArg()); break; case Event.CONFIG: Map<String, Object> config = (Map<String, Object>) evt.getArg(); if (config != null && config.containsKey("state_transfer")) log.error( Util.getMessage( "ProtocolStackCannotContainTwoStateTransferProtocolsRemoveEitherOneOfThem")); break; } return up_prot.up(evt); }
public Object up(Message msg) { ExecutorHeader hdr = msg.getHeader(id); if (hdr == null) return up_prot.up(msg); Request req = msg.getObject(); if (log.isTraceEnabled()) log.trace("[" + local_addr + "] <-- [" + msg.getSrc() + "] " + req); switch (req.type) { case RUN_REQUEST: handleTaskRequest(req.request, (Address) req.object); break; case CONSUMER_READY: handleConsumerReadyRequest(req.request, (Address) req.object); break; case CONSUMER_UNREADY: handleConsumerUnreadyRequest(req.request, (Address) req.object); break; case CONSUMER_FOUND: handleConsumerFoundResponse(req.request, (Address) req.object); break; case RUN_SUBMITTED: RequestWithThread reqWT = (RequestWithThread) req; Object objectToRun = reqWT.object; Runnable runnable; if (objectToRun instanceof Runnable) { runnable = (Runnable) objectToRun; } else if (objectToRun instanceof Callable) { @SuppressWarnings("unchecked") Callable<Object> callable = (Callable<Object>) objectToRun; runnable = new FutureTask<>(callable); } else { log.error( Util.getMessage("RequestOfType") + req.type + " sent an object of " + objectToRun + " which is invalid"); break; } handleTaskSubmittedRequest(runnable, msg.getSrc(), req.request, reqWT.threadId); break; case RUN_REJECTED: // We could make requests local for this, but is it really worth it handleTaskRejectedResponse(msg.getSrc(), req.request); break; case RESULT_SUCCESS: handleValueResponse(msg.getSrc(), req.request, req.object); break; case RESULT_EXCEPTION: handleExceptionResponse(msg.getSrc(), req.request, (Throwable) req.object); break; case INTERRUPT_RUN: // We could make requests local for this, but is it really worth it handleInterruptRequest(msg.getSrc(), req.request); break; case CREATE_CONSUMER_READY: Owner owner = new Owner((Address) req.object, req.request); handleNewConsumer(owner); break; case CREATE_RUN_REQUEST: owner = new Owner((Address) req.object, req.request); handleNewRunRequest(owner); break; case DELETE_CONSUMER_READY: owner = new Owner((Address) req.object, req.request); handleRemoveConsumer(owner); break; case DELETE_RUN_REQUEST: owner = new Owner((Address) req.object, req.request); handleRemoveRunRequest(owner); break; default: log.error(Util.getMessage("RequestOfType") + req.type + " not known"); break; } return null; }
protected void handleJmx(Map<String, String> map, String input) { Map<String, Object> tmp_stats; int index = input.indexOf("="); if (index > -1) { List<String> list = null; String protocol_name = input.substring(index + 1); index = protocol_name.indexOf("."); if (index > -1) { String rest = protocol_name; protocol_name = protocol_name.substring(0, index); String attrs = rest.substring(index + 1); // e.g. "num_sent,msgs,num_received_msgs" list = Util.parseStringList(attrs, ","); // check if there are any attribute-sets in the list for (Iterator<String> it = list.iterator(); it.hasNext(); ) { String tmp = it.next(); index = tmp.indexOf("="); if (index != -1) { String attrname = tmp.substring(0, index); String attrvalue = tmp.substring(index + 1); Protocol prot = prot_stack.findProtocol(protocol_name); Field field = prot != null ? Util.getField(prot.getClass(), attrname) : null; if (field != null) { Object value = MethodCall.convert(attrvalue, field.getType()); if (value != null) prot.setValue(attrname, value); } else { // try to find a setter for X, e.g. x(type-of-x) or setX(type-of-x) ResourceDMBean.Accessor setter = ResourceDMBean.findSetter( prot, attrname); // Util.getSetter(prot.getClass(), attrname); if (setter != null) { try { Class<?> type = setter instanceof ResourceDMBean.FieldAccessor ? ((ResourceDMBean.FieldAccessor) setter).getField().getType() : setter instanceof ResourceDMBean.MethodAccessor ? ((ResourceDMBean.MethodAccessor) setter) .getMethod() .getParameterTypes()[0] .getClass() : null; Object converted_value = MethodCall.convert(attrvalue, type); setter.invoke(converted_value); } catch (Exception e) { log.error("unable to invoke %s() on %s: %s", setter, protocol_name, e); } } else log.warn(Util.getMessage("FieldNotFound"), attrname, protocol_name); } it.remove(); } } } tmp_stats = dumpStats(protocol_name, list); if (tmp_stats != null) { for (Map.Entry<String, Object> entry : tmp_stats.entrySet()) { Map<String, Object> tmp_map = (Map<String, Object>) entry.getValue(); String key = entry.getKey(); map.put(key, tmp_map != null ? tmp_map.toString() : null); } } } else { tmp_stats = dumpStats(); if (tmp_stats != null) { for (Map.Entry<String, Object> entry : tmp_stats.entrySet()) { Map<String, Object> tmp_map = (Map<String, Object>) entry.getValue(); String key = entry.getKey(); map.put(key, tmp_map != null ? tmp_map.toString() : null); } } } }
protected static XmlConfigurator parse(InputStream stream, Boolean validate) throws java.io.IOException { /** * CAUTION: crappy code ahead ! I (bela) am not an XML expert, so the code below is pretty * amateurish... But it seems to work, and it is executed only on startup, so no perf loss on * the critical path. If somebody wants to improve this, please be my guest. */ try { DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance(); boolean validation = false; String tmp = Util.getProperty(new String[] {Global.XML_VALIDATION}, null, null, false, null); if (tmp != null) { validation = Boolean.valueOf(tmp).booleanValue(); } else if (validate != null) { validation = validate.booleanValue(); } factory.setValidating(validation); factory.setNamespaceAware(validation); if (validation) { factory.setAttribute(JAXP_SCHEMA_LANGUAGE, W3C_XML_SCHEMA); } DocumentBuilder builder = factory.newDocumentBuilder(); builder.setEntityResolver( new EntityResolver() { public InputSource resolveEntity(String publicId, String systemId) throws IOException { if (systemId != null && systemId.startsWith("http://www.jgroups.org/schema/JGroups-")) { String schemaName = systemId.substring("http://www.jgroups.org/".length()); InputStream schemaIs = getAsInputStreamFromClassLoader(schemaName); if (schemaIs == null) { throw new IOException("Schema not found from classloader: " + schemaName); } InputSource source = new InputSource(schemaIs); source.setPublicId(publicId); source.setSystemId(systemId); return source; } return null; } }); // Use AtomicReference to allow make variable final, not for atomicity // We store only last exception final AtomicReference<SAXParseException> exceptionRef = new AtomicReference<SAXParseException>(); builder.setErrorHandler( new ErrorHandler() { public void warning(SAXParseException exception) throws SAXException { log.warn("Warning during parse", exception); } public void fatalError(SAXParseException exception) throws SAXException { exceptionRef.set(exception); } public void error(SAXParseException exception) throws SAXException { exceptionRef.set(exception); } }); Document document = builder.parse(stream); if (exceptionRef.get() != null) { throw exceptionRef.get(); } // The root element of the document should be the "config" element, // but the parser(Element) method checks this so a check is not // needed here. Element configElement = document.getDocumentElement(); return parse(configElement); } catch (Exception x) { throw new IOException(Util.getMessage("ParseError", x.getLocalizedMessage())); } }
/** * Sends all messages currently in forward_table to the new coordinator (changing the dest field). * This needs to be done, so the underlying reliable unicast protocol (e.g. UNICAST) adds these * messages to its retransmission mechanism<br> * Note that we need to resend the messages in order of their seqnos ! We also need to prevent * other message from being inserted until we're done, that's why there's synchronization.<br> * Access to the forward_table doesn't need to be synchronized as there won't be any insertions * during flushing (all down-threads are blocked) */ protected void flushMessagesInForwardTable() { if (is_coord) { for (Map.Entry<Long, Message> entry : forward_table.entrySet()) { Long key = entry.getKey(); Message msg = entry.getValue(); Buffer buf; try { buf = Util.streamableToBuffer(msg); } catch (Exception e) { log.error(Util.getMessage("FlushingBroadcastingFailed"), e); continue; } SequencerHeader hdr = new SequencerHeader(SequencerHeader.WRAPPED_BCAST, key); Message forward_msg = new Message(null, buf).putHeader(this.id, hdr); if (log.isTraceEnabled()) log.trace(local_addr + ": flushing (broadcasting) " + local_addr + "::" + key); down_prot.down(new Event(Event.MSG, forward_msg)); } return; } // for forwarded messages, we need to receive the forwarded message from the coordinator, to // prevent this case: // - V1={A,B,C} // - A crashes // - C installs V2={B,C} // - C forwards messages 3 and 4 to B (the new coord) // - B drops 3 because its view is still V1 // - B installs V2 // - B receives message 4 and broadcasts it // ==> C's message 4 is delivered *before* message 3 ! // ==> By resending 3 until it is received, then resending 4 until it is received, we make sure // this won't happen // (see https://issues.jboss.org/browse/JGRP-1449) while (flushing && running && !forward_table.isEmpty()) { Map.Entry<Long, Message> entry = forward_table.firstEntry(); final Long key = entry.getKey(); Message msg = entry.getValue(); Buffer buf; try { buf = Util.streamableToBuffer(msg); } catch (Exception e) { log.error(Util.getMessage("FlushingBroadcastingFailed"), e); continue; } while (flushing && running && !forward_table.isEmpty()) { SequencerHeader hdr = new SequencerHeader(SequencerHeader.FLUSH, key); Message forward_msg = new Message(coord, buf).putHeader(this.id, hdr).setFlag(Message.Flag.DONT_BUNDLE); if (log.isTraceEnabled()) log.trace( local_addr + ": flushing (forwarding) " + local_addr + "::" + key + " to coord " + coord); ack_promise.reset(); down_prot.down(new Event(Event.MSG, forward_msg)); Long ack = ack_promise.getResult(500); if ((Objects.equals(ack, key)) || !forward_table.containsKey(key)) break; } } }
public Object down(Event evt) { switch (evt.getType()) { case Event.MSG: Message msg = (Message) evt.getArg(); if (msg.getDest() != null || msg.isFlagSet(Message.Flag.NO_TOTAL_ORDER) || msg.isFlagSet(Message.Flag.OOB)) break; if (msg.getSrc() == null) msg.setSrc(local_addr); if (flushing) block(); // A seqno is not used to establish ordering, but only to weed out duplicates; next_seqno // doesn't need // to increase monotonically, but only to be unique // (https://issues.jboss.org/browse/JGRP-1461) ! long next_seqno = seqno.incrementAndGet(); in_flight_sends.incrementAndGet(); try { SequencerHeader hdr = new SequencerHeader( is_coord ? SequencerHeader.BCAST : SequencerHeader.WRAPPED_BCAST, next_seqno); msg.putHeader(this.id, hdr); if (log.isTraceEnabled()) log.trace( "[" + local_addr + "]: forwarding " + local_addr + "::" + seqno + " to coord " + coord); // We always forward messages to the coordinator, even if we're the coordinator. Having // the coord // send its messages directly led to starvation of messages from other members. MPerf perf // went up // from 20MB/sec/node to 50MB/sec/node with this change ! forwardToCoord(next_seqno, msg); } catch (Exception ex) { log.error(Util.getMessage("FailedSendingMessage"), ex); } finally { in_flight_sends.decrementAndGet(); } return null; // don't pass down case Event.VIEW_CHANGE: handleViewChange((View) evt.getArg()); break; case Event.TMP_VIEW: handleTmpView((View) evt.getArg()); break; case Event.SET_LOCAL_ADDRESS: local_addr = (Address) evt.getArg(); break; } return down_prot.down(evt); }