Ejemplo n.º 1
0
 synchronized boolean isServerDeadAndNotProcessed(ServerName server) {
   if (server == null) return false;
   if (serverManager.isServerOnline(server)) {
     String hostAndPort = server.getHostAndPort();
     long startCode = server.getStartcode();
     Long deadCode = deadServers.get(hostAndPort);
     if (deadCode == null || startCode > deadCode.longValue()) {
       if (serverManager.isServerReachable(server)) {
         return false;
       }
       // The size of deadServers won't grow unbounded.
       deadServers.put(hostAndPort, Long.valueOf(startCode));
     }
     // Watch out! If the server is not dead, the region could
     // remain unassigned. That's why ServerManager#isServerReachable
     // should use some retry.
     //
     // We cache this info since it is very unlikely for that
     // instance to come back up later on. We don't want to expire
     // the server since we prefer to let it die naturally.
     LOG.warn("Couldn't reach online server " + server);
   }
   // Now, we know it's dead. Check if it's processed
   return !processedServers.containsKey(server);
 }
Ejemplo n.º 2
0
 public static void main(String[] args) {
   try {
     ServerManager serverManager = initializeServer();
     serverManager.initialize();
     serverManager.start();
   } catch (Exception e) {
     log.severe("Failed to start the server");
     log.severe(e.getMessage());
     System.exit(0);
   }
 }
Ejemplo n.º 3
0
  private <T> void assertQueryable(
      QueryGranularity granularity,
      String dataSource,
      Interval interval,
      List<Pair<String, Interval>> expected) {
    Iterator<Pair<String, Interval>> expectedIter = expected.iterator();
    final List<Interval> intervals = Arrays.asList(interval);
    final SearchQuery query =
        Druids.newSearchQueryBuilder()
            .dataSource(dataSource)
            .intervals(intervals)
            .granularity(granularity)
            .limit(10000)
            .query("wow")
            .build();
    QueryRunner<Result<SearchResultValue>> runner =
        serverManager.getQueryRunnerForIntervals(query, intervals);
    final Sequence<Result<SearchResultValue>> seq = runner.run(query);
    Sequences.toList(seq, Lists.<Result<SearchResultValue>>newArrayList());
    Iterator<SegmentForTesting> adaptersIter = factory.getAdapters().iterator();

    while (expectedIter.hasNext() && adaptersIter.hasNext()) {
      Pair<String, Interval> expectedVals = expectedIter.next();
      SegmentForTesting value = adaptersIter.next();

      Assert.assertEquals(expectedVals.lhs, value.getVersion());
      Assert.assertEquals(expectedVals.rhs, value.getInterval());
    }

    Assert.assertFalse(expectedIter.hasNext());
    Assert.assertFalse(adaptersIter.hasNext());

    factory.clearAdapters();
  }
Ejemplo n.º 4
0
  /**
   * A region is online, won't be in transition any more. We can't confirm it is really online on
   * specified region server because it hasn't been put in region server's online region list yet.
   */
  public void regionOnline(final HRegionInfo hri, final ServerName serverName, long openSeqNum) {
    String encodedName = hri.getEncodedName();
    if (!serverManager.isServerOnline(serverName)) {
      // This is possible if the region server dies before master gets a
      // chance to handle ZK event in time. At this time, if the dead server
      // is already processed by SSH, we should ignore this event.
      // If not processed yet, ignore and let SSH deal with it.
      LOG.warn("Ignored, " + encodedName + " was opened on a dead server: " + serverName);
      return;
    }
    updateRegionState(hri, State.OPEN, serverName, openSeqNum);

    synchronized (this) {
      regionsInTransition.remove(encodedName);
      ServerName oldServerName = regionAssignments.put(hri, serverName);
      if (!serverName.equals(oldServerName)) {
        if (LOG.isDebugEnabled()) {
          LOG.debug("Onlined " + hri.getShortNameToLog() + " on " + serverName);
        }
        addToServerHoldings(serverName, hri);
        addToReplicaMapping(hri);
        if (oldServerName == null) {
          oldServerName = oldAssignments.remove(encodedName);
        }
        if (oldServerName != null
            && !oldServerName.equals(serverName)
            && serverHoldings.containsKey(oldServerName)) {
          LOG.info("Offlined " + hri.getShortNameToLog() + " from " + oldServerName);
          removeFromServerHoldings(oldServerName, hri);
        }
      }
    }
  }
Ejemplo n.º 5
0
  @Override
  public void addSegment(DataSegment segment) {
    try {
      log.info("Loading segment %s", segment.getIdentifier());

      try {
        serverManager.loadSegment(segment);
      } catch (Exception e) {
        removeSegment(segment);
        throw new SegmentLoadingException(
            e, "Exception loading segment[%s]", segment.getIdentifier());
      }

      File segmentInfoCacheFile = new File(config.getInfoDir(), segment.getIdentifier());
      if (!segmentInfoCacheFile.exists()) {
        try {
          jsonMapper.writeValue(segmentInfoCacheFile, segment);
        } catch (IOException e) {
          removeSegment(segment);
          throw new SegmentLoadingException(
              e, "Failed to write to disk segment info cache file[%s]", segmentInfoCacheFile);
        }
      }

      try {
        announcer.announceSegment(segment);
      } catch (IOException e) {
        throw new SegmentLoadingException(
            e, "Failed to announce segment[%s]", segment.getIdentifier());
      }

    } catch (SegmentLoadingException e) {
      log.makeAlert(e, "Failed to load segment for dataSource").addData("segment", segment).emit();
    }
  }
  @Before
  public void setup() throws Exception {
    TEST_UTIL = new HBaseTestingUtility();
    TEST_UTIL.startMiniZKCluster();
    conf = TEST_UTIL.getConfiguration();
    // Use a different ZK wrapper instance for each tests.
    zkw =
        new ZooKeeperWatcher(conf, "split-log-manager-tests" + UUID.randomUUID().toString(), null);
    ZKUtil.deleteChildrenRecursively(zkw, zkw.baseZNode);
    ZKUtil.createAndFailSilent(zkw, zkw.baseZNode);
    assertTrue(ZKUtil.checkExists(zkw, zkw.baseZNode) != -1);
    LOG.debug(zkw.baseZNode + " created");
    ZKUtil.createAndFailSilent(zkw, zkw.splitLogZNode);
    assertTrue(ZKUtil.checkExists(zkw, zkw.splitLogZNode) != -1);
    LOG.debug(zkw.splitLogZNode + " created");

    stopped = false;
    resetCounters();

    // By default, we let the test manage the error as before, so the server
    //  does not appear as dead from the master point of view, only from the split log pov.
    Mockito.when(sm.isServerOnline(Mockito.any(ServerName.class))).thenReturn(true);
    Mockito.when(master.getServerManager()).thenReturn(sm);

    to = 4000;
    conf.setInt("hbase.splitlog.manager.timeout", to);
    conf.setInt("hbase.splitlog.manager.unassigned.timeout", 2 * to);
    conf.setInt("hbase.splitlog.manager.timeoutmonitor.period", 100);
    to = to + 4 * 100;
  }
Ejemplo n.º 7
0
  private void loadCache() {
    File baseDir = config.getInfoDir();
    if (!baseDir.exists()) {
      return;
    }

    List<DataSegment> cachedSegments = Lists.newArrayList();
    for (File file : baseDir.listFiles()) {
      log.info("Loading segment cache file [%s]", file);
      try {
        DataSegment segment = jsonMapper.readValue(file, DataSegment.class);
        if (serverManager.isSegmentCached(segment)) {
          cachedSegments.add(segment);
        } else {
          log.warn(
              "Unable to find cache file for %s. Deleting lookup entry", segment.getIdentifier());

          File segmentInfoCacheFile = new File(config.getInfoDir(), segment.getIdentifier());
          if (!segmentInfoCacheFile.delete()) {
            log.warn("Unable to delete segmentInfoCacheFile[%s]", segmentInfoCacheFile);
          }
        }
      } catch (Exception e) {
        log.makeAlert(e, "Failed to load segment from segmentInfo file")
            .addData("file", file)
            .emit();
      }
    }

    addSegments(cachedSegments);
  }
Ejemplo n.º 8
0
 /** 读取设置 */
 private void loadProp() {
   ServerManager manager = ServerManager.getServerManager();
   smtpPortText.setText(manager.getPropertyValue("smtpPort"));
   pop3PortText.setText(manager.getPropertyValue("pop3Port"));
   localDomainText.setText(manager.getPropertyValue("localDomain"));
   localHostNameText.setText(manager.getPropertyValue("localHostName"));
   connectLostTimeText.setText(manager.getPropertyValue("connectLostTime"));
   connectResponseTimeText.setText(manager.getPropertyValue("connectResponseTime"));
   emailSizeText.setText(manager.getPropertyValue("emailSize"));
 }
Ejemplo n.º 9
0
 /** 保存设置 */
 private void saveProp() {
   ServerManager manager = ServerManager.getServerManager();
   String text = smtpPortText.getText();
   manager.savePropertyByKey("smtpPort", text);
   text = pop3PortText.getText();
   manager.savePropertyByKey("pop3Port", text);
   text = localDomainText.getText();
   manager.savePropertyByKey("localDomain", text);
   text = localHostNameText.getText();
   manager.savePropertyByKey("localHostName", text);
   text = connectLostTimeText.getText();
   manager.savePropertyByKey("connectLostTime", text);
   text = connectResponseTimeText.getText();
   manager.savePropertyByKey("connectResponseTime", text);
   text = emailSizeText.getText();
   manager.savePropertyByKey("emailSize", text);
   // 写入文件
   manager.saveProperty();
 }
Ejemplo n.º 10
0
 private ServerManager boot(
     String[] args, InputStream stdin, PrintStream stdout, PrintStream stderr) {
   ServerManager sm = null;
   try {
     ServerManagerEnvironment config = determineEnvironment(args, stdin, stdout, stderr);
     if (config == null) {
       abort(null);
       return null;
     } else {
       sm = new ServerManager(config);
       sm.start();
     }
   } catch (Throwable t) {
     t.printStackTrace(stderr);
     abort(t);
     return null;
   }
   return sm;
 }
Ejemplo n.º 11
0
  @Test(timeout = 60000)
  public void testServerListener() throws IOException, InterruptedException {
    ServerManager serverManager = TEST_UTIL.getHBaseCluster().getMaster().getServerManager();

    DummyServerListener listener = new DummyServerListener();
    serverManager.registerListener(listener);
    try {
      MiniHBaseCluster miniCluster = TEST_UTIL.getMiniHBaseCluster();

      // Start a new Region Server
      miniCluster.startRegionServer();
      listener.awaitModifications(1);
      assertEquals(1, listener.getAddedCount());
      assertEquals(0, listener.getRemovedCount());

      // Start another Region Server
      listener.reset();
      miniCluster.startRegionServer();
      listener.awaitModifications(1);
      assertEquals(1, listener.getAddedCount());
      assertEquals(0, listener.getRemovedCount());

      int nrs = miniCluster.getRegionServerThreads().size();

      // Stop a Region Server
      listener.reset();
      miniCluster.stopRegionServer(nrs - 1);
      listener.awaitModifications(1);
      assertEquals(0, listener.getAddedCount());
      assertEquals(1, listener.getRemovedCount());

      // Stop another Region Server
      listener.reset();
      miniCluster.stopRegionServer(nrs - 2);
      listener.awaitModifications(1);
      assertEquals(0, listener.getAddedCount());
      assertEquals(1, listener.getRemovedCount());
    } finally {
      serverManager.unregisterListener(listener);
    }
  }
Ejemplo n.º 12
0
  public void addSegments(Iterable<DataSegment> segments) {
    try {
      final List<String> segmentFailures = Lists.newArrayList();
      final List<DataSegment> validSegments = Lists.newArrayList();

      for (DataSegment segment : segments) {
        log.info("Loading segment %s", segment.getIdentifier());

        try {
          serverManager.loadSegment(segment);
        } catch (Exception e) {
          log.error(e, "Exception loading segment[%s]", segment.getIdentifier());
          removeSegment(segment);
          segmentFailures.add(segment.getIdentifier());
          continue;
        }

        File segmentInfoCacheFile = new File(config.getInfoDir(), segment.getIdentifier());
        if (!segmentInfoCacheFile.exists()) {
          try {
            jsonMapper.writeValue(segmentInfoCacheFile, segment);
          } catch (IOException e) {
            log.error(
                e, "Failed to write to disk segment info cache file[%s]", segmentInfoCacheFile);
            removeSegment(segment);
            segmentFailures.add(segment.getIdentifier());
            continue;
          }
        }

        validSegments.add(segment);
      }

      try {
        announcer.announceSegments(validSegments);
      } catch (IOException e) {
        throw new SegmentLoadingException(e, "Failed to announce segments[%s]", segments);
      }

      if (!segmentFailures.isEmpty()) {
        for (String segmentFailure : segmentFailures) {
          log.error("%s failed to load", segmentFailure);
        }
        throw new SegmentLoadingException(
            "%,d errors seen while loading segments", segmentFailures.size());
      }
    } catch (SegmentLoadingException e) {
      log.makeAlert(e, "Failed to load segments for dataSource")
          .addData("segments", segments)
          .emit();
    }
  }
Ejemplo n.º 13
0
  @Test
  public void testLoadCache() throws Exception {
    EasyMock.replay(yp);

    List<DataSegment> segments =
        Lists.newArrayList(
            makeSegment("test", "1", new Interval("P1d/2011-04-01")),
            makeSegment("test", "1", new Interval("P1d/2011-04-02")),
            makeSegment("test", "2", new Interval("P1d/2011-04-02")),
            makeSegment("test", "1", new Interval("P1d/2011-04-03")),
            makeSegment("test", "1", new Interval("P1d/2011-04-04")),
            makeSegment("test", "1", new Interval("P1d/2011-04-05")),
            makeSegment("test", "2", new Interval("PT1h/2011-04-04T01")),
            makeSegment("test", "2", new Interval("PT1h/2011-04-04T02")),
            makeSegment("test", "2", new Interval("PT1h/2011-04-04T03")),
            makeSegment("test", "2", new Interval("PT1h/2011-04-04T05")),
            makeSegment("test", "2", new Interval("PT1h/2011-04-04T06")),
            makeSegment("test2", "1", new Interval("P1d/2011-04-01")),
            makeSegment("test2", "1", new Interval("P1d/2011-04-02")));
    Collections.sort(segments);

    for (DataSegment segment : segments) {
      writeSegmentToCache(segment);
    }

    checkCache(segments);
    Assert.assertTrue(serverManager.getDataSourceCounts().isEmpty());
    zkCoordinator.start();
    Assert.assertTrue(!serverManager.getDataSourceCounts().isEmpty());
    zkCoordinator.stop();

    for (DataSegment segment : segments) {
      deleteSegmentFromCache(segment);
    }

    Assert.assertEquals(0, cacheDir.listFiles().length);
    Assert.assertTrue(cacheDir.delete());
  }
Ejemplo n.º 14
0
  @Override
  public void removeSegment(DataSegment segment) {
    try {
      serverManager.dropSegment(segment);

      File segmentInfoCacheFile = new File(config.getInfoDir(), segment.getIdentifier());
      if (!segmentInfoCacheFile.delete()) {
        log.warn("Unable to delete segmentInfoCacheFile[%s]", segmentInfoCacheFile);
      }

      announcer.unannounceSegment(segment);
    } catch (Exception e) {
      log.makeAlert(e, "Failed to remove segment").addData("segment", segment).emit();
    }
  }
Ejemplo n.º 15
0
  /**
   * This is an EXPENSIVE clone. Cloning though is the safest thing to do. Can't let out original
   * since it can change and at least the load balancer wants to iterate this exported list. We need
   * to synchronize on regions since all access to this.servers is under a lock on this.regions.
   *
   * @return A clone of current assignments by table.
   */
  protected Map<TableName, Map<ServerName, List<HRegionInfo>>> getAssignmentsByTable() {
    Map<TableName, Map<ServerName, List<HRegionInfo>>> result =
        new HashMap<TableName, Map<ServerName, List<HRegionInfo>>>();
    synchronized (this) {
      if (!server
          .getConfiguration()
          .getBoolean(HConstants.HBASE_MASTER_LOADBALANCE_BYTABLE, false)) {
        Map<ServerName, List<HRegionInfo>> svrToRegions =
            new HashMap<ServerName, List<HRegionInfo>>(serverHoldings.size());
        for (Map.Entry<ServerName, Set<HRegionInfo>> e : serverHoldings.entrySet()) {
          svrToRegions.put(e.getKey(), new ArrayList<HRegionInfo>(e.getValue()));
        }
        result.put(TableName.valueOf(HConstants.ENSEMBLE_TABLE_NAME), svrToRegions);
      } else {
        for (Map.Entry<ServerName, Set<HRegionInfo>> e : serverHoldings.entrySet()) {
          for (HRegionInfo hri : e.getValue()) {
            if (hri.isMetaRegion()) continue;
            TableName tablename = hri.getTable();
            Map<ServerName, List<HRegionInfo>> svrToRegions = result.get(tablename);
            if (svrToRegions == null) {
              svrToRegions = new HashMap<ServerName, List<HRegionInfo>>(serverHoldings.size());
              result.put(tablename, svrToRegions);
            }
            List<HRegionInfo> regions = svrToRegions.get(e.getKey());
            if (regions == null) {
              regions = new ArrayList<HRegionInfo>();
              svrToRegions.put(e.getKey(), regions);
            }
            regions.add(hri);
          }
        }
      }
    }

    Map<ServerName, ServerLoad> onlineSvrs = serverManager.getOnlineServers();
    // Take care of servers w/o assignments, and remove servers in draining mode
    List<ServerName> drainingServers = this.serverManager.getDrainingServersList();
    for (Map<ServerName, List<HRegionInfo>> map : result.values()) {
      for (ServerName svr : onlineSvrs.keySet()) {
        if (!map.containsKey(svr)) {
          map.put(svr, new ArrayList<HRegionInfo>());
        }
      }
      map.keySet().removeAll(drainingServers);
    }
    return result;
  }
Ejemplo n.º 16
0
 private void dropQueryable(String dataSource, String version, Interval interval) {
   try {
     serverManager.dropSegment(
         new DataSegment(
             dataSource,
             interval,
             version,
             ImmutableMap.<String, Object>of("version", version, "interval", interval),
             Arrays.asList("dim1", "dim2", "dim3"),
             Arrays.asList("metric1", "metric2"),
             new NoneShardSpec(),
             IndexIO.CURRENT_VERSION_ID,
             123l));
   } catch (SegmentLoadingException e) {
     throw new RuntimeException(e);
   }
 }
 /**
  * Start the service. Setup a remote domain controller connection and hand it to the server
  * manager.
  *
  * @param context The start context
  * @throws StartException
  */
 public synchronized void start(final StartContext context) throws StartException {
   InetAddress dcAddress = domainControllerAddress.getValue();
   if (dcAddress.isAnyLocalAddress() || dcAddress.isSiteLocalAddress()) {
     try {
       dcAddress = InetAddress.getLocalHost();
     } catch (UnknownHostException e) {
       throw new StartException("Failed to get domain controller address", e);
     }
   }
   final NetworkInterfaceBinding managementInterface = localManagementInterface.getValue();
   domainControllerConnection =
       new RemoteDomainControllerConnection(
           serverManager.getName(),
           dcAddress,
           domainControllerPort.getValue(),
           managementInterface.getAddress(),
           localManagementPort.getValue(),
           localRepository,
           connectTimeout,
           executorService.getValue(),
           threadFactoryValue.getValue());
 }
Ejemplo n.º 18
0
 /**
  * Compute the average load across all region servers. Currently, this uses a very naive
  * computation - just uses the number of regions being served, ignoring stats about number of
  * requests.
  *
  * @return the average load
  */
 protected synchronized double getAverageLoad() {
   int numServers = 0, totalLoad = 0;
   for (Map.Entry<ServerName, Set<HRegionInfo>> e : serverHoldings.entrySet()) {
     Set<HRegionInfo> regions = e.getValue();
     ServerName serverName = e.getKey();
     int regionCount = regions.size();
     if (serverManager.isServerOnline(serverName)) {
       totalLoad += regionCount;
       numServers++;
     }
   }
   if (numServers > 1) {
     // The master region server holds only a couple regions.
     // Don't consider this server in calculating the average load
     // if there are other region servers to avoid possible confusion.
     Set<HRegionInfo> hris = serverHoldings.get(server.getServerName());
     if (hris != null) {
       totalLoad -= hris.size();
       numServers--;
     }
   }
   return numServers == 0 ? 0.0 : (double) totalLoad / (double) numServers;
 }
  @Test
  public void testWorkerCrash() throws Exception {
    slm = new SplitLogManager(zkw, conf, stopper, master, DUMMY_MASTER, null);
    slm.finishInitialization();
    TaskBatch batch = new TaskBatch();

    String tasknode = submitTaskAndWait(batch, "foo/1");
    final ServerName worker1 = new ServerName("worker1,1,1");

    SplitLogTask slt = new SplitLogTask.Owned(worker1);
    ZKUtil.setData(zkw, tasknode, slt.toByteArray());
    if (tot_mgr_heartbeat.get() == 0) waitForCounter(tot_mgr_heartbeat, 0, 1, to / 2);

    // Not yet resubmitted.
    Assert.assertEquals(0, tot_mgr_resubmit.get());

    // This server becomes dead
    Mockito.when(sm.isServerOnline(worker1)).thenReturn(false);

    Thread.sleep(1300); // The timeout checker is done every 1000 ms (hardcoded).

    // It has been resubmitted
    Assert.assertEquals(1, tot_mgr_resubmit.get());
  }
  private ServerContextInformation initESB(
      String configurationName, ConfigurationContext configurationContext) throws AxisFault {
    ServerConfigurationInformation configurationInformation =
        ServerConfigurationInformationFactory.createServerConfigurationInformation(
            configurationContext.getAxisConfiguration());
    // ability to specify the SynapseServerName as a system property
    if (System.getProperty("SynapseServerName") != null) {
      configurationInformation.setServerName(System.getProperty("SynapseServerName"));
    }

    // for now we override the default configuration location with the value in registry
    String repoPath = configurationContext.getAxisConfiguration().getRepository().getPath();
    configurationInformation.setSynapseXMLLocation(
        repoPath
            + File.separator
            + ServiceBusConstants.SYNAPSE_CONFIGS
            + File.separator
            + configurationName);

    configurationInformation.setCreateNewInstance(false);
    configurationInformation.setServerControllerProvider(CarbonSynapseController.class.getName());
    if (isRunningSamplesMode()) {
      configurationInformation.setSynapseXMLLocation(
          "repository"
              + File.separator
              + "samples"
              + File.separator
              + "synapse_sample_"
              + System.getProperty(ServiceBusConstants.ESB_SAMPLE_SYSTEM_PROPERTY)
              + ".xml");
    }

    ServerManager serverManager = new ServerManager();
    ServerContextInformation contextInfo =
        new ServerContextInformation(configurationContext, configurationInformation);

    /*if (dataSourceInformationRepositoryService != null) {
        DataSourceInformationRepository repository =
                dataSourceInformationRepositoryService.getDataSourceInformationRepository();
        contextInfo.addProperty(DataSourceConstants.DATA_SOURCE_INFORMATION_REPOSITORY,
                repository);
    }*/

    TaskScheduler scheduler;
    if (configurationContext.getProperty(ServiceBusConstants.CARBON_TASK_SCHEDULER) == null) {
      scheduler = new TaskScheduler(TaskConstants.TASK_SCHEDULER);
      configurationContext.setProperty(ServiceBusConstants.CARBON_TASK_SCHEDULER, scheduler);
    } else {
      scheduler =
          (TaskScheduler)
              configurationContext.getProperty(ServiceBusConstants.CARBON_TASK_SCHEDULER);
    }
    contextInfo.addProperty(TaskConstants.TASK_SCHEDULER, scheduler);

    TaskDescriptionRepository repository;
    if (configurationContext.getProperty(ServiceBusConstants.CARBON_TASK_REPOSITORY) == null) {
      repository = new TaskDescriptionRepository();
      configurationContext.setProperty(ServiceBusConstants.CARBON_TASK_REPOSITORY, repository);
    } else {
      repository =
          (TaskDescriptionRepository)
              configurationContext.getProperty(ServiceBusConstants.CARBON_TASK_REPOSITORY);
    }
    contextInfo.addProperty(TaskConstants.TASK_DESCRIPTION_REPOSITORY, repository);

    /* if (secretCallbackHandlerService != null) {
        contextInfo.addProperty(SecurityConstants.PROP_SECRET_CALLBACK_HANDLER,
                secretCallbackHandlerService.getSecretCallbackHandler());
    }*/

    AxisConfiguration axisConf = configurationContext.getAxisConfiguration();
    axisConf.addParameter(
        new Parameter(ServiceBusConstants.SYNAPSE_CURRENT_CONFIGURATION, configurationName));

    serverManager.init(configurationInformation, contextInfo);
    serverManager.start();

    AxisServiceGroup serviceGroup = axisConf.getServiceGroup(SynapseConstants.SYNAPSE_SERVICE_NAME);
    serviceGroup.addParameter("hiddenService", "true");

    addDeployers(configurationContext);

    return contextInfo;
  }
Ejemplo n.º 21
0
 @Override
 protected void onPostExecute(String s) {
   ServerManager.setServerStatus(s);
 }
Ejemplo n.º 22
0
  /**
   * Quits the client nicely.
   *
   * @param reason The quit reason to send
   * @param exitCode This is the exit code that will be returned to the operating system when the
   *     client exits
   */
  public static void quit(final String reason, final int exitCode) {
    ServerManager.getServerManager().disconnectAll(reason);

    System.exit(exitCode);
  }