@Override public void propertyChange(PropertyChangeEvent e) { Boolean connected = (Boolean) e.getNewValue(); TL1AssociationEvent event; if (connected.booleanValue()) { event = new TL1AssociationEvent(TL1AssociationEvent.ASSOCIATION_UP, neInfo); } else { event = new TL1AssociationEvent(TL1AssociationEvent.ASSOCIATION_DOWN, neInfo); } if (registerListener != null) { registerListener.receiveEvent(event); } if (heartBeatThread != null) { heartBeatThread.terminateHeartBeat(); ; } /* * If the connection is dropped, the only way to re establish the connection * is to open the new socket. But we cannot rely on the IP address and port * number that we currently have since, NE Discovery Manager may switch to * the different set Anyhow, just terminate the thread and wait for next * instruction */ // terminateAssociation(); }
public synchronized void terminateAssociation() { registerListener = null; if (tl1Engine != null) { tl1Engine.removePropertyChangeListener(TL1LanguageEngine.CONNECTED, this); } if (heartBeatThread != null) { heartBeatThread.terminateHeartBeat(); } }
private void heartBeatStarts() throws Exception { // Initializing all necessary properties. heartBeatThread = new HeartBeatThread( System.getProperty("org.opendrac.proxy.heartbeat.speedup", "false").equals("true") ? true : false, Integer.parseInt( System.getProperty( "org.opendrac.proxy.heartbeat.timeout", HEARTBEAT_TIMEOUT_DEFAULT)), Integer.parseInt( System.getProperty( "org.opendrac.proxy.heartbeat.delay", HEARTBEAT_DELAY_DEFAULT)) * 1000, Integer.parseInt( System.getProperty( "org.opendrac.proxy.heartbeat.skip", HEARTBEAT_SKIP_COUNT_DEFAULT)), "HeartBeat thread " + neInfo.getAddressInfo().toString()); heartBeatThread.setDaemon(true); heartBeatThread.start(); }
public void login() throws Exception { tl1Engine.addPropertyChangeListener(TL1LanguageEngine.CONNECTED, this); AbstractTL1SendHelper helper = new TL1SynchSendHelper(neInfo, keepAliveMessage, composer, parser); tl1Engine.send(helper); TL1ResponseMessage response = helper.getTL1ResponseMessage(); if (!response.getTid().equals(neInfo.getNetworkElementName())) { neInfo.setNeName(response.getTid()); log.debug( "Updated TID to <" + response.getTid() + "> for " + neInfo.getAddressInfo().getPrimaryIPAddress() + " " + neInfo.getAddressInfo().getPrimaryPort()); } /** * WP: June 2009. Globally only permit a single ACT-USER command to be outstanding at once. Why? * Thanks to OMEA stupidity, if the NE is set to use OMEA for radius authentication then we hit * a bug with OMEA. OMEA can only process a single RADIUS login request at a time, if we have 10 * threads all trying to login to NEs after a restart we'll have 10 outstanding RADIUS requests * and most/all will fail and mess us up big time. We used to globally restrict the number of * worker threads to just 1, but that slows down the rest of DRAC, especially if we have > 1 NE * that are unreachable, we'll spend too much time trying to reconnect to the dead NEs. Instead * we lock just the ACT-USER phase. */ synchronized (GLOBAL_ACT_USER_LOCK) { helper = new TL1SynchSendHelper(neInfo, buildLoginMsg(), composer, parser); tl1Engine.send(helper); response = helper.getTL1ResponseMessage(); } TL1AssociationEvent event; // Check for TID if (!response.getTid().equals(neInfo.getNetworkElementName())) { event = new TL1AssociationEvent(TL1AssociationEvent.WRONG_TID, neInfo); if (heartBeatThread != null) { heartBeatThread.disableKeepAlive(); } if (registerListener != null) { registerListener.receiveEvent(event); } return; } if (!response.getCompletionCode().equals(TL1Constants.COMPLETED)) { Exception ex = new Exception("AssciationException: failed to login"); // comment throw ex; } if (registerListener != null) { event = new TL1AssociationEvent(TL1AssociationEvent.ASSOCIATION_UP, neInfo); registerListener.receiveEvent(event); } else { log.debug( "Association: No listener " + neInfo.getNetworkElementName() + " for event ASSOCIATION_UP"); } heartBeatStarts(); }