Beispiel #1
0
 public static void addLogEntry(Credentials credentials, LogEntry entry, ZooLock zooLock) {
   if (entry.extent.isRootTablet()) {
     String root = getZookeeperLogLocation();
     while (true) {
       try {
         IZooReaderWriter zoo = ZooReaderWriter.getInstance();
         if (zoo.isLockHeld(zooLock.getLockID())) {
           String[] parts = entry.filename.split("/");
           String uniqueId = parts[parts.length - 1];
           zoo.putPersistentData(
               root + "/" + uniqueId, entry.toBytes(), NodeExistsPolicy.OVERWRITE);
         }
         break;
       } catch (KeeperException e) {
         log.error(e, e);
       } catch (InterruptedException e) {
         log.error(e, e);
       } catch (IOException e) {
         log.error(e, e);
       }
       UtilWaitThread.sleep(1000);
     }
   } else {
     Mutation m = new Mutation(entry.getRow());
     m.put(entry.getColumnFamily(), entry.getColumnQualifier(), entry.getValue());
     update(credentials, zooLock, m, entry.extent);
   }
 }
 public static <T> T execute(
     Instance instance, ClientExecReturn<T, MasterClientService.Client> exec)
     throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
   MasterClientService.Client client = null;
   while (true) {
     try {
       client = getConnectionWithRetry(instance);
       return exec.execute(client);
     } catch (TTransportException tte) {
       log.debug("MasterClient request failed, retrying ... ", tte);
       UtilWaitThread.sleep(100);
     } catch (ThriftSecurityException e) {
       throw new AccumuloSecurityException(e.user, e.code, e);
     } catch (AccumuloException e) {
       throw e;
     } catch (ThriftTableOperationException e) {
       switch (e.getType()) {
         case NAMESPACE_NOTFOUND:
           throw new TableNotFoundException(e.getTableName(), new NamespaceNotFoundException(e));
         case NOTFOUND:
           throw new TableNotFoundException(e);
         default:
           throw new AccumuloException(e);
       }
     } catch (Exception e) {
       throw new AccumuloException(e);
     } finally {
       if (client != null) close(client);
     }
   }
 }
Beispiel #3
0
 public static void removeUnusedWALEntries(
     KeyExtent extent, List<LogEntry> logEntries, ZooLock zooLock) {
   if (extent.isRootTablet()) {
     for (LogEntry entry : logEntries) {
       String root = getZookeeperLogLocation();
       while (true) {
         try {
           IZooReaderWriter zoo = ZooReaderWriter.getInstance();
           if (zoo.isLockHeld(zooLock.getLockID()))
             zoo.recursiveDelete(root + "/" + entry.filename, NodeMissingPolicy.SKIP);
           break;
         } catch (Exception e) {
           log.error(e, e);
         }
         UtilWaitThread.sleep(1000);
       }
     }
   } else {
     Mutation m = new Mutation(extent.getMetadataEntry());
     for (LogEntry entry : logEntries) {
       m.putDelete(LogColumnFamily.NAME, new Text(entry.toString()));
     }
     update(SystemCredentials.get(), zooLock, m, extent);
   }
 }
 @Test(timeout = 5 * 60 * 1000)
 public void testConcurrentAssignmentPerformance() throws Exception {
   // make a table with a lot of splits
   String tableName = getUniqueNames(1)[0];
   Connector c = getConnector();
   c.tableOperations().create(tableName);
   SortedSet<Text> splits = new TreeSet<Text>();
   for (int i = 0; i < 4000; i++) {
     splits.add(new Text(randomHex(8)));
   }
   c.tableOperations().addSplits(tableName, splits);
   c.tableOperations().offline(tableName, true);
   // time how long it takes to load
   long now = System.currentTimeMillis();
   c.tableOperations().online(tableName, true);
   long diff = System.currentTimeMillis() - now;
   log.debug("Loaded " + splits.size() + " tablets in " + diff + " ms");
   c.instanceOperations().setProperty(Property.TSERV_ASSIGNMENT_MAXCONCURRENT.getKey(), "20");
   now = System.currentTimeMillis();
   c.tableOperations().offline(tableName, true);
   // wait >10 seconds for thread pool to update
   UtilWaitThread.sleep(Math.max(0, now + 11 * 1000 - System.currentTimeMillis()));
   now = System.currentTimeMillis();
   c.tableOperations().online(tableName, true);
   long diff2 = System.currentTimeMillis() - now;
   log.debug("Loaded " + splits.size() + " tablets in " + diff2 + " ms");
   assertTrue(diff2 < diff);
 }
  public static void main(String[] args) throws Exception {
    Opts opts = new Opts();
    ScannerOpts scanOpts = new ScannerOpts();
    opts.parseArgs(ContinuousScanner.class.getName(), args, scanOpts);

    Random r = new Random();

    long distance = 1000000000000l;

    Connector conn = opts.getConnector();
    Authorizations auths = opts.randomAuths.getAuths(r);
    Scanner scanner = ContinuousUtil.createScanner(conn, opts.getTableName(), auths);
    scanner.setBatchSize(scanOpts.scanBatchSize);

    double delta = Math.min(.05, .05 / (opts.numToScan / 1000.0));

    while (true) {
      long startRow = ContinuousIngest.genLong(opts.min, opts.max - distance, r);
      byte[] scanStart = ContinuousIngest.genRow(startRow);
      byte[] scanStop = ContinuousIngest.genRow(startRow + distance);

      scanner.setRange(new Range(new Text(scanStart), new Text(scanStop)));

      int count = 0;
      Iterator<Entry<Key, Value>> iter = scanner.iterator();

      long t1 = System.currentTimeMillis();

      while (iter.hasNext()) {
        Entry<Key, Value> entry = iter.next();
        ContinuousWalk.validate(entry.getKey(), entry.getValue());
        count++;
      }

      long t2 = System.currentTimeMillis();

      // System.out.println("P1 " +count +" "+((1-delta) * numToScan)+" "+((1+delta) * numToScan)+"
      // "+numToScan);

      if (count < (1 - delta) * opts.numToScan || count > (1 + delta) * opts.numToScan) {
        if (count == 0) {
          distance = distance * 10;
          if (distance < 0) distance = 1000000000000l;
        } else {
          double ratio = (double) opts.numToScan / count;
          // move ratio closer to 1 to make change slower
          ratio = ratio - (ratio - 1.0) * (2.0 / 3.0);
          distance = (long) (ratio * distance);
        }

        // System.out.println("P2 "+delta +" "+numToScan+" "+distance+"  "+((double)numToScan/count
        // ));
      }

      System.out.printf("SCN %d %s %d %d%n", t1, new String(scanStart, UTF_8), (t2 - t1), count);

      if (opts.sleepTime > 0) UtilWaitThread.sleep(opts.sleepTime);
    }
  }
Beispiel #6
0
  /**
   * Start a server, at the given port, or higher, if that port is not available.
   *
   * @param portHintProperty the port to attempt to open, can be zero, meaning "any available port"
   * @param processor the service to be started
   * @param serverName the name of the class that is providing the service
   * @param threadName name this service's thread for better debugging
   * @param portSearchProperty
   * @param minThreadProperty
   * @param timeBetweenThreadChecksProperty
   * @return the server object created, and the port actually used
   * @throws UnknownHostException when we don't know our own address
   */
  public static ServerAddress startServer(
      AccumuloConfiguration conf,
      String address,
      Property portHintProperty,
      TProcessor processor,
      String serverName,
      String threadName,
      Property portSearchProperty,
      Property minThreadProperty,
      Property timeBetweenThreadChecksProperty,
      Property maxMessageSizeProperty)
      throws UnknownHostException {
    int portHint = conf.getPort(portHintProperty);
    int minThreads = 2;
    if (minThreadProperty != null) minThreads = conf.getCount(minThreadProperty);
    long timeBetweenThreadChecks = 1000;
    if (timeBetweenThreadChecksProperty != null)
      timeBetweenThreadChecks = conf.getTimeInMillis(timeBetweenThreadChecksProperty);
    long maxMessageSize = 10 * 1000 * 1000;
    if (maxMessageSizeProperty != null)
      maxMessageSize = conf.getMemoryInBytes(maxMessageSizeProperty);
    boolean portSearch = false;
    if (portSearchProperty != null) portSearch = conf.getBoolean(portSearchProperty);
    // create the TimedProcessor outside the port search loop so we don't try to register the same
    // metrics mbean more than once
    TServerUtils.TimedProcessor timedProcessor =
        new TServerUtils.TimedProcessor(processor, serverName, threadName);
    Random random = new Random();
    for (int j = 0; j < 100; j++) {

      // Are we going to slide around, looking for an open port?
      int portsToSearch = 1;
      if (portSearch) portsToSearch = 1000;

      for (int i = 0; i < portsToSearch; i++) {
        int port = portHint + i;
        if (portHint == 0) port = 1024 + random.nextInt(65535 - 1024);
        if (port > 65535) port = 1024 + port % (65535 - 1024);
        try {
          InetSocketAddress addr = new InetSocketAddress(address, port);
          return TServerUtils.startTServer(
              addr,
              timedProcessor,
              serverName,
              threadName,
              minThreads,
              timeBetweenThreadChecks,
              maxMessageSize);
        } catch (Exception ex) {
          log.info("Unable to use port " + port + ", retrying. (Thread Name = " + threadName + ")");
          UtilWaitThread.sleep(250);
        }
      }
    }
    throw new UnknownHostException("Unable to find a listen port");
  }
  public static MasterClientService.Client getConnectionWithRetry(Instance instance) {
    checkArgument(instance != null, "instance is null");

    while (true) {

      MasterClientService.Client result = getConnection(instance);
      if (result != null) return result;
      UtilWaitThread.sleep(250);
    }
  }
Beispiel #8
0
  /**
   * Get the monitor lock in ZooKeeper
   *
   * @throws KeeperException
   * @throws InterruptedException
   */
  private void getMonitorLock() throws KeeperException, InterruptedException {
    final String zRoot = ZooUtil.getRoot(instance);
    final String monitorPath = zRoot + Constants.ZMONITOR;
    final String monitorLockPath = zRoot + Constants.ZMONITOR_LOCK;

    // Ensure that everything is kosher with ZK as this has changed.
    ZooReaderWriter zoo = ZooReaderWriter.getInstance();
    if (zoo.exists(monitorPath)) {
      byte[] data = zoo.getData(monitorPath, null);
      // If the node isn't empty, it's from a previous install (has hostname:port for HTTP server)
      if (0 != data.length) {
        // Recursively delete from that parent node
        zoo.recursiveDelete(monitorPath, NodeMissingPolicy.SKIP);

        // And then make the nodes that we expect for the incoming ephemeral nodes
        zoo.putPersistentData(monitorPath, new byte[0], NodeExistsPolicy.FAIL);
        zoo.putPersistentData(monitorLockPath, new byte[0], NodeExistsPolicy.FAIL);
      } else if (!zoo.exists(monitorLockPath)) {
        // monitor node in ZK exists and is empty as we expect
        // but the monitor/lock node does not
        zoo.putPersistentData(monitorLockPath, new byte[0], NodeExistsPolicy.FAIL);
      }
    } else {
      // 1.5.0 and earlier
      zoo.putPersistentData(zRoot + Constants.ZMONITOR, new byte[0], NodeExistsPolicy.FAIL);
      if (!zoo.exists(monitorLockPath)) {
        // Somehow the monitor node exists but not monitor/lock
        zoo.putPersistentData(monitorLockPath, new byte[0], NodeExistsPolicy.FAIL);
      }
    }

    // Get a ZooLock for the monitor
    while (true) {
      MoniterLockWatcher monitorLockWatcher = new MoniterLockWatcher();
      monitorLock = new ZooLock(monitorLockPath);
      monitorLock.lockAsync(monitorLockWatcher, new byte[0]);

      monitorLockWatcher.waitForChange();

      if (monitorLockWatcher.acquiredLock) {
        break;
      }

      if (!monitorLockWatcher.failedToAcquireLock) {
        throw new IllegalStateException("monitor lock in unknown state");
      }

      monitorLock.tryToCancelAsyncLockOrUnlock();

      UtilWaitThread.sleep(
          getSystemConfiguration().getTimeInMillis(Property.MONITOR_LOCK_CHECK_INTERVAL));
    }

    log.info("Got Monitor lock.");
  }
Beispiel #9
0
 @Test
 public void interleaveSplit() throws Exception {
   Connector c = getConnector();
   c.tableOperations().create("test_ingest");
   c.tableOperations().setProperty("test_ingest", Property.TABLE_SPLIT_THRESHOLD.getKey(), "10K");
   c.tableOperations()
       .setProperty("test_ingest", Property.TABLE_FILE_COMPRESSION_TYPE.getKey(), "none");
   ReadWriteIT.interleaveTest(c);
   UtilWaitThread.sleep(5 * 1000);
   assertTrue(c.tableOperations().listSplits("test_ingest").size() > 20);
 }
Beispiel #10
0
 @Test
 public void deleteSplit() throws Exception {
   Connector c = getConnector();
   c.tableOperations().create("test_ingest");
   c.tableOperations().setProperty("test_ingest", Property.TABLE_SPLIT_THRESHOLD.getKey(), "10K");
   DeleteIT.deleteTest(c, cluster);
   c.tableOperations().flush("test_ingest", null, null, true);
   for (int i = 0; i < 5; i++) {
     UtilWaitThread.sleep(10 * 1000);
     if (c.tableOperations().listSplits("test_ingest").size() > 20) break;
   }
   assertTrue(c.tableOperations().listSplits("test_ingest").size() > 20);
 }
Beispiel #11
0
 /** Write the given Mutation to the replication table. */
 protected static void update(Credentials credentials, Mutation m, KeyExtent extent) {
   Writer t = getWriter(credentials);
   while (true) {
     try {
       t.update(m);
       return;
     } catch (AccumuloException e) {
       log.error(e.toString(), e);
     } catch (AccumuloSecurityException e) {
       log.error(e.toString(), e);
     } catch (ConstraintViolationException e) {
       log.error(e.toString(), e);
     } catch (TableNotFoundException e) {
       log.error(e.toString(), e);
     }
     UtilWaitThread.sleep(1000);
   }
 }
Beispiel #12
0
 @Test
 public void tabletShouldSplit() throws Exception {
   Connector c = getConnector();
   c.tableOperations().create("test_ingest");
   c.tableOperations().setProperty("test_ingest", Property.TABLE_SPLIT_THRESHOLD.getKey(), "256K");
   c.tableOperations()
       .setProperty("test_ingest", Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), "1K");
   TestIngest.Opts opts = new TestIngest.Opts();
   opts.rows = 100000;
   TestIngest.ingest(c, opts, new BatchWriterOpts());
   VerifyIngest.Opts vopts = new VerifyIngest.Opts();
   vopts.rows = opts.rows;
   VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
   UtilWaitThread.sleep(15 * 1000);
   String id = c.tableOperations().tableIdMap().get("test_ingest");
   Scanner s = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
   KeyExtent extent = new KeyExtent(new Text(id), null, null);
   s.setRange(extent.toMetadataRange());
   MetadataSchema.TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(s);
   int count = 0;
   int shortened = 0;
   for (Entry<Key, Value> entry : s) {
     extent = new KeyExtent(entry.getKey().getRow(), entry.getValue());
     if (extent.getEndRow() != null && extent.getEndRow().toString().length() < 14) shortened++;
     count++;
   }
   assertTrue(shortened > 0);
   assertTrue(count > 10);
   assertEquals(
       0,
       cluster
           .exec(
               CheckForMetadataProblems.class,
               "-i",
               cluster.getInstanceName(),
               "-u",
               "root",
               "-p",
               ROOT_PASSWORD,
               "-z",
               cluster.getZooKeepers())
           .waitFor());
 }
Beispiel #13
0
 public static void update(
     Credentials credentials, ZooLock zooLock, Mutation m, KeyExtent extent) {
   Writer t = extent.isMeta() ? getRootTable(credentials) : getMetadataTable(credentials);
   if (zooLock != null) putLockID(zooLock, m);
   while (true) {
     try {
       t.update(m);
       return;
     } catch (AccumuloException e) {
       log.error(e, e);
     } catch (AccumuloSecurityException e) {
       log.error(e, e);
     } catch (ConstraintViolationException e) {
       log.error(e, e);
     } catch (TableNotFoundException e) {
       log.error(e, e);
     }
     UtilWaitThread.sleep(1000);
   }
 }
Beispiel #14
0
  private static void setupLocalityGroups(
      Connector conn, int numlg, ArrayList<byte[]> cfset, String table)
      throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
    if (numlg > 1) {
      int numCF = cfset.size() / numlg;
      int gNum = 0;

      Iterator<byte[]> cfiter = cfset.iterator();
      Map<String, Set<Text>> groups = new HashMap<String, Set<Text>>();
      while (cfiter.hasNext()) {
        HashSet<Text> groupCols = new HashSet<Text>();
        for (int i = 0; i < numCF && cfiter.hasNext(); i++) {
          groupCols.add(new Text(cfiter.next()));
        }

        groups.put("lg" + (gNum++), groupCols);
      }

      conn.tableOperations().setLocalityGroups(table, groups);
      conn.tableOperations().offline(table);
      UtilWaitThread.sleep(1000);
      conn.tableOperations().online(table);
    }
  }
Beispiel #15
0
  private int write(Collection<CommitSession> sessions, boolean mincFinish, Writer writer)
      throws IOException {
    // Work very hard not to lock this during calls to the outside world
    int currentLogSet = logSetId.get();

    int seq = -1;

    int attempt = 0;
    boolean success = false;
    while (!success) {
      try {
        // get a reference to the loggers that no other thread can touch
        ArrayList<DfsLogger> copy = new ArrayList<DfsLogger>();
        currentLogSet = initializeLoggers(copy);

        // add the logger to the log set for the memory in the tablet,
        // update the metadata table if we've never used this tablet

        if (currentLogSet == logSetId.get()) {
          for (CommitSession commitSession : sessions) {
            if (commitSession.beginUpdatingLogsUsed(copy, mincFinish)) {
              try {
                // Scribble out a tablet definition and then write to the metadata table
                defineTablet(commitSession);
                if (currentLogSet == logSetId.get())
                  tserver.addLoggersToMetadata(
                      copy, commitSession.getExtent(), commitSession.getLogId());
              } finally {
                commitSession.finishUpdatingLogsUsed();
              }
            }
          }
        }

        // Make sure that the logs haven't changed out from underneath our copy
        if (currentLogSet == logSetId.get()) {

          // write the mutation to the logs
          seq = seqGen.incrementAndGet();
          if (seq < 0)
            throw new RuntimeException("Logger sequence generator wrapped!  Onos!!!11!eleven");
          ArrayList<LoggerOperation> queuedOperations = new ArrayList<LoggerOperation>(copy.size());
          for (DfsLogger wal : copy) {
            LoggerOperation lop = writer.write(wal, seq);
            if (lop != null) queuedOperations.add(lop);
          }

          for (LoggerOperation lop : queuedOperations) {
            lop.await();
          }

          // double-check: did the log set change?
          success = (currentLogSet == logSetId.get());
        }
      } catch (DfsLogger.LogClosedException ex) {
        log.debug("Logs closed while writing, retrying " + (attempt + 1));
      } catch (Exception t) {
        log.error("Unexpected error writing to log, retrying attempt " + (attempt + 1), t);
        UtilWaitThread.sleep(100);
      } finally {
        attempt++;
      }
      // Some sort of write failure occurred. Grab the write lock and reset the logs.
      // But since multiple threads will attempt it, only attempt the reset when
      // the logs haven't changed.
      final int finalCurrent = currentLogSet;
      if (!success) {
        testLockAndRun(
            logSetLock,
            new TestCallWithWriteLock() {

              @Override
              boolean test() {
                return finalCurrent == logSetId.get();
              }

              @Override
              void withWriteLock() throws IOException {
                close();
              }
            });
      }
    }
    // if the log gets too big, reset it .. grab the write lock first
    logSizeEstimate.addAndGet(4 * 3); // event, tid, seq overhead
    testLockAndRun(
        logSetLock,
        new TestCallWithWriteLock() {
          boolean test() {
            return logSizeEstimate.get() > maxSize;
          }

          void withWriteLock() throws IOException {
            close();
          }
        });
    return seq;
  }
Beispiel #16
0
  public static void cloneTable(
      Instance instance, String srcTableId, String tableId, VolumeManager volumeManager)
      throws Exception {

    Connector conn =
        instance.getConnector(
            SystemCredentials.get().getPrincipal(), SystemCredentials.get().getToken());
    BatchWriter bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());

    while (true) {

      try {
        initializeClone(srcTableId, tableId, conn, bw);

        // the following loop looks changes in the file that occurred during the copy.. if files
        // were dereferenced then they could have been GCed

        while (true) {
          int rewrites = checkClone(srcTableId, tableId, conn, bw);

          if (rewrites == 0) break;
        }

        bw.flush();
        break;

      } catch (TabletIterator.TabletDeletedException tde) {
        // tablets were merged in the src table
        bw.flush();

        // delete what we have cloned and try again
        deleteTable(tableId, false, SystemCredentials.get(), null);

        log.debug(
            "Tablets merged in table " + srcTableId + " while attempting to clone, trying again");

        UtilWaitThread.sleep(100);
      }
    }

    // delete the clone markers and create directory entries
    Scanner mscanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
    mscanner.setRange(new KeyExtent(new Text(tableId), null, null).toMetadataRange());
    mscanner.fetchColumnFamily(ClonedColumnFamily.NAME);

    int dirCount = 0;

    for (Entry<Key, Value> entry : mscanner) {
      Key k = entry.getKey();
      Mutation m = new Mutation(k.getRow());
      m.putDelete(k.getColumnFamily(), k.getColumnQualifier());
      String dir =
          volumeManager.choose(ServerConstants.getTablesDirs())
              + "/"
              + tableId
              + new String(FastFormat.toZeroPaddedString(dirCount++, 8, 16, "/c-".getBytes()));
      TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(dir.getBytes()));
      bw.addMutation(m);
    }

    bw.close();
  }
Beispiel #17
0
  public TraceServer(ServerConfiguration serverConfiguration, String hostname) throws Exception {
    this.serverConfiguration = serverConfiguration;
    AccumuloConfiguration conf = serverConfiguration.getConfiguration();
    table = conf.get(Property.TRACE_TABLE);
    while (true) {
      try {
        String principal = conf.get(Property.TRACE_USER);
        AuthenticationToken at;
        Map<String, String> loginMap =
            conf.getAllPropertiesWithPrefix(Property.TRACE_TOKEN_PROPERTY_PREFIX);
        if (loginMap.isEmpty()) {
          Property p = Property.TRACE_PASSWORD;
          at = new PasswordToken(conf.get(p).getBytes());
        } else {
          Properties props = new Properties();
          AuthenticationToken token =
              AccumuloClassLoader.getClassLoader()
                  .loadClass(conf.get(Property.TRACE_TOKEN_TYPE))
                  .asSubclass(AuthenticationToken.class)
                  .newInstance();

          int prefixLength = Property.TRACE_TOKEN_PROPERTY_PREFIX.getKey().length() + 1;
          for (Entry<String, String> entry : loginMap.entrySet()) {
            props.put(entry.getKey().substring(prefixLength), entry.getValue());
          }

          token.init(props);

          at = token;
        }

        connector = serverConfiguration.getInstance().getConnector(principal, at);
        if (!connector.tableOperations().exists(table)) {
          connector.tableOperations().create(table);
          IteratorSetting setting = new IteratorSetting(10, "ageoff", AgeOffFilter.class.getName());
          AgeOffFilter.setTTL(setting, 7 * 24 * 60 * 60 * 1000l);
          connector.tableOperations().attachIterator(table, setting);
        }
        connector
            .tableOperations()
            .setProperty(
                table, Property.TABLE_FORMATTER_CLASS.getKey(), TraceFormatter.class.getName());
        break;
      } catch (Exception ex) {
        log.info("Waiting to checking/create the trace table.", ex);
        UtilWaitThread.sleep(1000);
      }
    }

    int port = conf.getPort(Property.TRACE_PORT);
    final ServerSocket sock = ServerSocketChannel.open().socket();
    sock.setReuseAddress(true);
    sock.bind(new InetSocketAddress(hostname, port));
    final TServerTransport transport = new TServerSocket(sock);
    TThreadPoolServer.Args options = new TThreadPoolServer.Args(transport);
    options.processor(new Processor<Iface>(new Receiver()));
    server = new TThreadPoolServer(options);
    registerInZooKeeper(sock.getInetAddress().getHostAddress() + ":" + sock.getLocalPort());
    writer =
        connector.createBatchWriter(
            table, new BatchWriterConfig().setMaxLatency(5, TimeUnit.SECONDS));
  }
Beispiel #18
0
  public static void fetchData() {
    double totalIngestRate = 0.;
    double totalIngestByteRate = 0.;
    double totalQueryRate = 0.;
    double totalQueryByteRate = 0.;
    double totalScanRate = 0.;
    long totalEntries = 0;
    int totalTabletCount = 0;
    int onlineTabletCount = 0;
    long totalHoldTime = 0;
    long totalLookups = 0;
    boolean retry = true;

    // only recalc every so often
    long currentTime = System.currentTimeMillis();
    if (currentTime - lastRecalc < REFRESH_TIME * 1000) return;

    synchronized (Monitor.class) {
      if (fetching) return;
      fetching = true;
    }

    try {
      while (retry) {
        MasterClientService.Iface client = null;
        try {
          client = MasterClient.getConnection(HdfsZooInstance.getInstance());
          if (client != null) {
            mmi =
                client.getMasterStats(
                    Tracer.traceInfo(),
                    SystemCredentials.get().toThrift(HdfsZooInstance.getInstance()));
            retry = false;
          } else {
            mmi = null;
          }
          Monitor.gcStatus = fetchGcStatus();
        } catch (Exception e) {
          mmi = null;
          log.info("Error fetching stats: " + e);
        } finally {
          if (client != null) {
            MasterClient.close(client);
          }
        }
        if (mmi == null) UtilWaitThread.sleep(1000);
      }
      if (mmi != null) {
        int majorCompactions = 0;
        int minorCompactions = 0;

        lookupRateTracker.startingUpdates();
        indexCacheHitTracker.startingUpdates();
        indexCacheRequestTracker.startingUpdates();
        dataCacheHitTracker.startingUpdates();
        dataCacheRequestTracker.startingUpdates();

        for (TabletServerStatus server : mmi.tServerInfo) {
          TableInfo summary = TableInfoUtil.summarizeTableStats(server);
          totalIngestRate += summary.ingestRate;
          totalIngestByteRate += summary.ingestByteRate;
          totalQueryRate += summary.queryRate;
          totalScanRate += summary.scanRate;
          totalQueryByteRate += summary.queryByteRate;
          totalEntries += summary.recs;
          totalHoldTime += server.holdTime;
          totalLookups += server.lookups;
          majorCompactions += summary.majors.running;
          minorCompactions += summary.minors.running;
          lookupRateTracker.updateTabletServer(server.name, server.lastContact, server.lookups);
          indexCacheHitTracker.updateTabletServer(
              server.name, server.lastContact, server.indexCacheHits);
          indexCacheRequestTracker.updateTabletServer(
              server.name, server.lastContact, server.indexCacheRequest);
          dataCacheHitTracker.updateTabletServer(
              server.name, server.lastContact, server.dataCacheHits);
          dataCacheRequestTracker.updateTabletServer(
              server.name, server.lastContact, server.dataCacheRequest);
        }

        lookupRateTracker.finishedUpdating();
        indexCacheHitTracker.finishedUpdating();
        indexCacheRequestTracker.finishedUpdating();
        dataCacheHitTracker.finishedUpdating();
        dataCacheRequestTracker.finishedUpdating();

        int totalTables = 0;
        for (TableInfo tInfo : mmi.tableMap.values()) {
          totalTabletCount += tInfo.tablets;
          onlineTabletCount += tInfo.onlineTablets;
          totalTables++;
        }
        Monitor.totalIngestRate = totalIngestRate;
        Monitor.totalTables = totalTables;
        totalIngestByteRate = totalIngestByteRate / 1000000.0;
        Monitor.totalIngestByteRate = totalIngestByteRate;
        Monitor.totalQueryRate = totalQueryRate;
        Monitor.totalScanRate = totalScanRate;
        totalQueryByteRate = totalQueryByteRate / 1000000.0;
        Monitor.totalQueryByteRate = totalQueryByteRate;
        Monitor.totalEntries = totalEntries;
        Monitor.totalTabletCount = totalTabletCount;
        Monitor.onlineTabletCount = onlineTabletCount;
        Monitor.totalHoldTime = totalHoldTime;
        Monitor.totalLookups = totalLookups;

        ingestRateOverTime.add(new Pair<Long, Double>(currentTime, totalIngestRate));
        ingestByteRateOverTime.add(new Pair<Long, Double>(currentTime, totalIngestByteRate));

        double totalLoad = 0.;
        for (TabletServerStatus status : mmi.tServerInfo) {
          if (status != null) totalLoad += status.osLoad;
        }
        loadOverTime.add(new Pair<Long, Double>(currentTime, totalLoad));

        minorCompactionsOverTime.add(new Pair<Long, Integer>(currentTime, minorCompactions));
        majorCompactionsOverTime.add(new Pair<Long, Integer>(currentTime, majorCompactions));

        lookupsOverTime.add(new Pair<Long, Double>(currentTime, lookupRateTracker.calculateRate()));

        queryRateOverTime.add(new Pair<Long, Integer>(currentTime, (int) totalQueryRate));
        queryByteRateOverTime.add(new Pair<Long, Double>(currentTime, totalQueryByteRate));

        scanRateOverTime.add(new Pair<Long, Integer>(currentTime, (int) totalScanRate));

        calcCacheHitRate(
            indexCacheHitRateOverTime, currentTime, indexCacheHitTracker, indexCacheRequestTracker);
        calcCacheHitRate(
            dataCacheHitRateOverTime, currentTime, dataCacheHitTracker, dataCacheRequestTracker);
      }
      try {
        Monitor.problemSummary = ProblemReports.getInstance().summarize();
        Monitor.problemException = null;
      } catch (Exception e) {
        log.info("Failed to obtain problem reports ", e);
        Monitor.problemSummary = Collections.emptyMap();
        Monitor.problemException = e;
      }

    } finally {
      synchronized (Monitor.class) {
        fetching = false;
        lastRecalc = currentTime;
      }
    }
  }