Ejemplo n.º 1
0
  /* ------------------------------------------------------------ */
  protected void doStart() throws Exception {
    if (_server == null) throw new IllegalStateException("No server");

    // open listener port
    open();

    super.doStart();

    if (_threadPool == null) _threadPool = _server.getThreadPool();
    if (_threadPool != _server.getThreadPool() && (_threadPool instanceof LifeCycle))
      ((LifeCycle) _threadPool).start();

    // Start selector thread
    synchronized (this) {
      _acceptorThread = new Thread[getAcceptors()];

      for (int i = 0; i < _acceptorThread.length; i++) {
        if (!_threadPool.dispatch(new Acceptor(i))) {
          Log.warn("insufficient maxThreads configured for {}", this);
          break;
        }
      }
    }

    Log.info("Started {}", this);
  }
  public NodeHeartbeatResponse nodeHeartbeat(
      Map<ApplicationId, List<ContainerStatus>> conts, boolean isHealthy, int resId)
      throws Exception {
    NodeHeartbeatRequest req = Records.newRecord(NodeHeartbeatRequest.class);
    NodeStatus status = Records.newRecord(NodeStatus.class);
    status.setResponseId(resId);
    status.setNodeId(nodeId);
    for (Map.Entry<ApplicationId, List<ContainerStatus>> entry : conts.entrySet()) {
      Log.info("entry.getValue() " + entry.getValue());
      status.setContainersStatuses(entry.getValue());
    }
    NodeHealthStatus healthStatus = Records.newRecord(NodeHealthStatus.class);
    healthStatus.setHealthReport("");
    healthStatus.setIsNodeHealthy(isHealthy);
    healthStatus.setLastHealthReportTime(1);
    status.setNodeHealthStatus(healthStatus);
    req.setNodeStatus(status);
    req.setLastKnownContainerTokenMasterKey(this.currentContainerTokenMasterKey);
    req.setLastKnownNMTokenMasterKey(this.currentNMTokenMasterKey);
    NodeHeartbeatResponse heartbeatResponse = resourceTracker.nodeHeartbeat(req);

    MasterKey masterKeyFromRM = heartbeatResponse.getContainerTokenMasterKey();
    if (masterKeyFromRM != null
        && masterKeyFromRM.getKeyId() != this.currentContainerTokenMasterKey.getKeyId()) {
      this.currentContainerTokenMasterKey = masterKeyFromRM;
    }

    masterKeyFromRM = heartbeatResponse.getNMTokenMasterKey();
    if (masterKeyFromRM != null
        && masterKeyFromRM.getKeyId() != this.currentNMTokenMasterKey.getKeyId()) {
      this.currentNMTokenMasterKey = masterKeyFromRM;
    }

    return heartbeatResponse;
  }
Ejemplo n.º 3
0
  public void fetchOYPData() {
    this.logger.info("[{}]线程开始执行,欧亚盘指数...");
    String year = DateUtils.formatDate(new Date(), "yyyy");
    String content;
    try {
      String url = "http://www.cailele.com/lottery/dcspf/spfhomeAction.php?term=";
      String pContent = HttpUtil.getContentByURL(FetchConstant.CURPERIOD_URL, false);
      Pattern p4 = Pattern.compile(FetchConstant.CUR_PERIOD);
      Matcher m = p4.matcher(pContent);
      if (m.find()) {
        String period =
            year.substring(2, year.length()) + m.group(1).substring(1, m.group(1).length());
        url = "http://www.cailele.com/lottery/dcspf/spfhomeAction.php?term=" + period;
        Log.info("==============【" + url + "】============");
      } else {
        url = FetchConstant.OYP_URL;
      }
      content = HttpUtil.getContentByURL(url, false);

      // ArrayList datas = fiterOYPContent(content);
      // BuildOYPXMLDoc(datas);
      // } catch (JDOMException e) {
      //	e.printStackTrace();
    } catch (IOException e) {
      e.printStackTrace();
    }
    this.logger.info("[{}]线程开始执行结束,欧亚盘指数...");
  }
Ejemplo n.º 4
0
  public ArrayList fiterOYPContent(String content) {
    ArrayList result = new ArrayList();
    Matcher m = p2.matcher(content);
    Log.info(content);
    while (m.find()) {
      ArrayList line = new ArrayList();
      String no = m.group(1);
      String oz = m.group(4);
      String oh = m.group(7);
      String ok = m.group(11);

      String yz = m.group(2);
      String yh = m.group(6);
      String yk = m.group(9);
      line.add(no);
      line.add(oz);
      line.add(oh);
      line.add(ok);
      line.add(yz);
      line.add(yh);
      line.add(yk);
      result.add(line);
    }

    return result;
  }
Ejemplo n.º 5
0
  public S2RobotWebServer(final int port, final File docRoot) {
    this.port = port;
    this.docRoot = docRoot;

    server = new Server(port);

    final ResourceHandler resource_handler = new ResourceHandler();
    resource_handler.setWelcomeFiles(new String[] {"index.html"});
    resource_handler.setResourceBase(docRoot.getAbsolutePath());
    Log.info("serving " + resource_handler.getBaseResource());
    final HandlerList handlers = new HandlerList();
    handlers.setHandlers(new Handler[] {resource_handler, new DefaultHandler()});
    server.setHandler(handlers);
  }
 public NodeHeartbeatResponse nodeHeartbeat(
     ApplicationAttemptId attemptId, long containerId, ContainerState containerState)
     throws Exception {
   HashMap<ApplicationId, List<ContainerStatus>> nodeUpdate =
       new HashMap<ApplicationId, List<ContainerStatus>>(1);
   ContainerStatus containerStatus =
       BuilderUtils.newContainerStatus(
           BuilderUtils.newContainerId(attemptId, containerId), containerState, "Success", 0);
   ArrayList<ContainerStatus> containerStatusList = new ArrayList<ContainerStatus>(1);
   containerStatusList.add(containerStatus);
   Log.info("ContainerStatus: " + containerStatus);
   nodeUpdate.put(attemptId.getApplicationId(), containerStatusList);
   return nodeHeartbeat(nodeUpdate, true);
 }
Ejemplo n.º 7
0
  public static void stopMasterAndAssignMeta(HBaseTestingUtility HTU)
      throws IOException, InterruptedException {
    // Stop master
    HMaster master = HTU.getHBaseCluster().getMaster();
    ServerName masterAddr = master.getServerName();
    master.stopMaster();

    Log.info("Waiting until master thread exits");
    while (HTU.getHBaseCluster().getMasterThread() != null
        && HTU.getHBaseCluster().getMasterThread().isAlive()) {
      Threads.sleep(100);
    }

    HRegionServer.TEST_SKIP_REPORTING_TRANSITION = true;
    // Master is down, so is the meta. We need to assign it somewhere
    // so that regions can be assigned during the mocking phase.
    HRegionServer hrs = HTU.getHBaseCluster().getLiveRegionServerThreads().get(0).getRegionServer();
    ZooKeeperWatcher zkw = hrs.getZooKeeper();
    MetaTableLocator mtl = new MetaTableLocator();
    ServerName sn = mtl.getMetaRegionLocation(zkw);
    if (sn != null && !masterAddr.equals(sn)) {
      return;
    }

    ProtobufUtil.openRegion(
        hrs.getRSRpcServices(), hrs.getServerName(), HRegionInfo.FIRST_META_REGIONINFO);
    while (true) {
      sn = mtl.getMetaRegionLocation(zkw);
      if (sn != null
          && sn.equals(hrs.getServerName())
          && hrs.onlineRegions.containsKey(HRegionInfo.FIRST_META_REGIONINFO.getEncodedName())) {
        break;
      }
      Thread.sleep(100);
    }
  }
Ejemplo n.º 8
0
  @Override
  public int run(String[] args) throws Exception {
    // Validate params etc
    JCommander jComm = new JCommander(this);
    jComm.setProgramName("Splout Page Counts example");
    try {
      jComm.parse(args);
    } catch (ParameterException e) {
      System.err.println(e.getMessage());
      jComm.usage();
      System.exit(-1);
    }

    boolean generate = !noGenerate; // just for clarifying

    if (generateTupleFiles && deploy) {
      System.err.println("Can't run a 'dry' TupleFile generation and deploy it.");
      jComm.usage();
      System.exit(-1);
    }

    Path outPath = new Path(outputPath);
    FileSystem outFs = outPath.getFileSystem(getConf());

    if (!FileSystem.getLocal(conf).equals(FileSystem.get(conf))) {
      File nativeLibs = new File("native");
      if (nativeLibs.exists()) {
        SploutHadoopConfiguration.addSQLite4JavaNativeLibsToDC(conf);
      }
    }

    if (generate) {
      Path inputPath = new Path(this.inputPath);
      FileSystem inputFileSystem = inputPath.getFileSystem(conf);

      FileStatus[] fileStatuses = inputFileSystem.listStatus(inputPath);

      // define the schema that the resultant table will have: date, hour, pagename, pageviews
      final Schema tableSchema =
          new Schema(
              "pagecounts",
              Fields.parse("date:string, hour:string, pagename:string, pageviews:int"));
      // define the schema of the input files: projectcode, pagename, pageviews, bytes
      Schema fileSchema =
          new Schema(
              "pagecountsfile",
              Fields.parse("projectcode:string, pagename:string, pageviews:int, bytes:long"));

      // instantiate a TableBuilder
      TableBuilder tableBuilder = new TableBuilder(tableSchema);

      // for every input file...
      for (FileStatus fileStatus : fileStatuses) {
        String fileName = fileStatus.getPath().getName().toString();
        // strip the date and the hour from the file name
        String fileDate = fileName.split("-")[1];
        String fileHour = fileName.split("-")[2].substring(0, 2);
        // instantiate a custom RecordProcessor to process the records of this file
        PageCountsRecordProcessor recordProcessor =
            new PageCountsRecordProcessor(tableSchema, fileDate, fileHour);
        // use the tableBuilder method for adding each of the files to the mix
        tableBuilder.addCSVTextFile(
            fileStatus.getPath(),
            ' ',
            TupleTextInputFormat.NO_QUOTE_CHARACTER,
            TupleTextInputFormat.NO_ESCAPE_CHARACTER,
            false,
            false,
            TupleTextInputFormat.NO_NULL_STRING,
            fileSchema,
            recordProcessor);
      }

      // partition the dataset by pagename - which should give a fair even distribution.
      tableBuilder.partitionBy("pagename");
      // create a compound index on pagename, date so that typical queries for the dataset will be
      // fast
      tableBuilder.createIndex("pagename", "date");

      long nonExactPageSize = memoryForIndexing / 32000; // number of pages
      int pageSize = (int) Math.pow(2, (int) Math.round(Math.log(nonExactPageSize) / Math.log(2)));
      Log.info(
          "Pagesize = "
              + pageSize
              + " as memory for indexing was ["
              + memoryForIndexing
              + "] and there are 32000 pages.");

      tableBuilder.initialSQL("pragma page_size=" + pageSize);
      // insertion order is very important for optimizing query speed because it makes data be
      // co-located in disk
      tableBuilder.insertionSortOrder(OrderBy.parse("pagename:asc, date:asc"));

      // instantiate a TablespaceBuilder
      TablespaceBuilder tablespaceBuilder = new TablespaceBuilder();

      // we will partition this dataset in as many partitions as:
      tablespaceBuilder.setNPartitions(nPartitions);
      tablespaceBuilder.add(tableBuilder.build());
      // we turn a specific SQLite pragma on for making autocomplete queries fast
      tablespaceBuilder.initStatements("pragma case_sensitive_like=true;");

      HadoopUtils.deleteIfExists(outFs, outPath);

      // finally, instantiate a TablespaceGenerator and execute it
      TablespaceGenerator tablespaceViewBuilder;

      if (generateTupleFiles) {
        // we subclass TablespaceGenerator to be able to run the generation without outputting the
        // SQLite stores, for
        // benchmark comparisons.
        // In the future this feature may be useful in general for debugging store creation.
        tablespaceViewBuilder =
            new TablespaceGenerator(tablespaceBuilder.build(), outPath) {

              @Override
              public void generateView(
                  Configuration conf, SamplingType samplingType, SamplingOptions samplingOptions)
                  throws Exception {

                prepareOutput(conf);
                final int nPartitions = tablespace.getnPartitions();
                if (nPartitions > 1) {
                  partitionMap = sample(nPartitions, conf, samplingType, samplingOptions);
                } else {
                  partitionMap = PartitionMap.oneShardOpenedMap();
                }
                writeOutputMetadata(conf);

                TupleMRBuilder builder = createMRBuilder(nPartitions, conf);
                // Set a TupleOutput here instead of SQLiteOutput
                builder.setOutput(
                    new Path(outputPath, OUT_STORE),
                    new TupleOutputFormat(tableSchema),
                    ITuple.class,
                    NullWritable.class);
                Job job = builder.createJob();
                executeViewGeneration(job);
              }
            };
      } else {
        // ... otherwise a standard TablespaceGenerator is used.
        tablespaceViewBuilder = new TablespaceGenerator(tablespaceBuilder.build(), outPath);
      }

      tablespaceViewBuilder.generateView(
          getConf(), SamplingType.RESERVOIR, new TupleSampler.DefaultSamplingOptions());
    }

    if (deploy) {
      // use StoreDeployerTool for deploying the already generated dataset
      StoreDeployerTool deployer = new StoreDeployerTool(qnode, getConf());
      ArrayList<TablespaceDepSpec> deployments = new ArrayList<TablespaceDepSpec>();
      deployments.add(new TablespaceDepSpec("pagecounts", outPath.toString(), repFactor, null));
      deployer.deploy(deployments);
    }
    return 1;
  }
Ejemplo n.º 9
0
 public void log(Object obj, String message) {
   Log.info(message);
 }
Ejemplo n.º 10
0
  @Test(timeout = 300000)
  public void testDataCorrectnessReplayingRecoveredEdits() throws Exception {
    final int NUM_MASTERS = 1;
    final int NUM_RS = 3;
    TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS);

    try {
      final byte[] TABLENAME = Bytes.toBytes("testDataCorrectnessReplayingRecoveredEdits");
      final byte[] FAMILY = Bytes.toBytes("family");
      MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
      HMaster master = cluster.getMaster();

      // Create table
      HTableDescriptor desc = new HTableDescriptor(TABLENAME);
      desc.addFamily(new HColumnDescriptor(FAMILY));
      HBaseAdmin hbaseAdmin = TEST_UTIL.getHBaseAdmin();
      hbaseAdmin.createTable(desc);

      assertTrue(hbaseAdmin.isTableAvailable(TABLENAME));

      // Put data: r1->v1
      Log.info("Loading r1 to v1 into " + Bytes.toString(TABLENAME));
      HTable table = new HTable(TEST_UTIL.getConfiguration(), TABLENAME);
      putDataAndVerify(table, "r1", FAMILY, "v1", 1);

      // Move region to target server
      HRegionInfo regionInfo = table.getRegionLocation("r1").getRegionInfo();
      int originServerNum = cluster.getServerWith(regionInfo.getRegionName());
      HRegionServer originServer = cluster.getRegionServer(originServerNum);
      int targetServerNum = (originServerNum + 1) % NUM_RS;
      HRegionServer targetServer = cluster.getRegionServer(targetServerNum);
      assertFalse(originServer.equals(targetServer));
      Log.info("Moving " + regionInfo.getEncodedName() + " to " + targetServer.getServerName());
      hbaseAdmin.move(
          regionInfo.getEncodedNameAsBytes(),
          Bytes.toBytes(targetServer.getServerName().getServerName()));
      do {
        Thread.sleep(1);
      } while (cluster.getServerWith(regionInfo.getRegionName()) == originServerNum);

      // Put data: r2->v2
      Log.info("Loading r2 to v2 into " + Bytes.toString(TABLENAME));
      putDataAndVerify(table, "r2", FAMILY, "v2", 2);

      // Move region to origin server
      Log.info("Moving " + regionInfo.getEncodedName() + " to " + originServer.getServerName());
      hbaseAdmin.move(
          regionInfo.getEncodedNameAsBytes(),
          Bytes.toBytes(originServer.getServerName().getServerName()));
      do {
        Thread.sleep(1);
      } while (cluster.getServerWith(regionInfo.getRegionName()) == targetServerNum);

      // Put data: r3->v3
      Log.info("Loading r3 to v3 into " + Bytes.toString(TABLENAME));
      putDataAndVerify(table, "r3", FAMILY, "v3", 3);

      // Kill target server
      Log.info("Killing target server " + targetServer.getServerName());
      targetServer.kill();
      cluster.getRegionServerThreads().get(targetServerNum).join();
      // Wait until finish processing of shutdown
      while (master.getServerManager().areDeadServersInProgress()) {
        Thread.sleep(5);
      }
      // Kill origin server
      Log.info("Killing origin server " + targetServer.getServerName());
      originServer.kill();
      cluster.getRegionServerThreads().get(originServerNum).join();

      // Put data: r4->v4
      Log.info("Loading r4 to v4 into " + Bytes.toString(TABLENAME));
      putDataAndVerify(table, "r4", FAMILY, "v4", 4);

    } finally {
      TEST_UTIL.shutdownMiniCluster();
    }
  }