public int getPort(Property property) {
    checkType(property, PropertyType.PORT);

    String portString = get(property);
    int port = Integer.parseInt(portString);
    if (port != 0) {
      if (port < 1024 || port > 65535) {
        log.error("Invalid port number " + port + "; Using default " + property.getDefaultValue());
        port = Integer.parseInt(property.getDefaultValue());
      }
    }
    return port;
  }
 private void checkType(Property property, PropertyType type) {
   if (!property.getType().equals(type)) {
     String msg =
         "Configuration method intended for type "
             + type
             + " called with a "
             + property.getType()
             + " argument ("
             + property.getKey()
             + ")";
     IllegalArgumentException err = new IllegalArgumentException(msg);
     log.error(msg, err);
     throw err;
   }
 }
  /**
   * This method returns all properties in a map of string->string under the given prefix property.
   *
   * @param property the prefix property, and must be of type PropertyType.PREFIX
   * @return a map of strings to strings of the resulting properties
   */
  public Map<String, String> getAllPropertiesWithPrefix(Property property) {
    checkType(property, PropertyType.PREFIX);

    Map<String, String> propMap = new HashMap<String, String>();
    getProperties(propMap, new PrefixFilter(property.getKey()));
    return propMap;
  }
Example #4
0
  public static boolean isValidTablePropertyKey(String key) {
    if (validTableProperties == null) {
      synchronized (Property.class) {
        if (validTableProperties == null) {
          HashSet<String> tmp = new HashSet<String>();
          for (Property p : Property.values())
            if (!p.getType().equals(PropertyType.PREFIX)
                && p.getKey().startsWith(Property.TABLE_PREFIX.getKey())) tmp.add(p.getKey());
          validTableProperties = tmp;
        }
      }
    }

    return validTableProperties.contains(key)
        || key.startsWith(Property.TABLE_CONSTRAINT_PREFIX.getKey())
        || key.startsWith(Property.TABLE_ITERATOR_PREFIX.getKey())
        || key.startsWith(Property.TABLE_LOCALITY_GROUP_PREFIX.getKey());
  }
    // BEGIN methods that Tablets call to make decisions about major compaction
    // when too many files are open, we may want tablets to compact down
    // to one map file
    public boolean needsMajorCompaction(
        SortedMap<FileRef, DataFileValue> tabletFiles, MajorCompactionReason reason) {
      if (closed) return false; // throw new IOException("closed");

      // int threshold;

      if (reason == MajorCompactionReason.USER) return true;

      if (reason == MajorCompactionReason.IDLE) {
        // threshold = 1;
        long idleTime;
        if (lastReportedCommitTime == 0) {
          // no commits, so compute how long the tablet has been assigned to the
          // tablet server
          idleTime = System.currentTimeMillis() - creationTime;
        } else {
          idleTime = System.currentTimeMillis() - lastReportedCommitTime;
        }

        if (idleTime < tableConf.getTimeInMillis(Property.TABLE_MAJC_COMPACTALL_IDLETIME)) {
          return false;
        }
      }
      CompactionStrategy strategy =
          Property.createTableInstanceFromPropertyName(
              tableConf,
              Property.TABLE_COMPACTION_STRATEGY,
              CompactionStrategy.class,
              new DefaultCompactionStrategy());
      strategy.init(Property.getCompactionStrategyOptions(tableConf));
      MajorCompactionRequest request =
          new MajorCompactionRequest(
              extent, reason, TabletServerResourceManager.this.fs, tableConf);
      request.setFiles(tabletFiles);
      try {
        return strategy.shouldCompact(request);
      } catch (IOException e) {
        throw new RuntimeException(e);
      }
    }
Example #6
0
  public static void parseIterConf(
      IteratorScope scope,
      List<IterInfo> iters,
      Map<String, Map<String, String>> allOptions,
      AccumuloConfiguration conf) {
    final Property scopeProperty = getProperty(scope);
    final String scopePropertyKey = scopeProperty.getKey();

    for (Entry<String, String> entry : conf.getAllPropertiesWithPrefix(scopeProperty).entrySet()) {
      String suffix = entry.getKey().substring(scopePropertyKey.length());
      String suffixSplit[] = suffix.split("\\.", 3);

      if (suffixSplit.length == 1) {
        String sa[] = entry.getValue().split(",");
        int prio = Integer.parseInt(sa[0]);
        String className = sa[1];
        iters.add(new IterInfo(prio, className, suffixSplit[0]));
      } else if (suffixSplit.length == 3 && suffixSplit[1].equals("opt")) {
        String iterName = suffixSplit[0];
        String optName = suffixSplit[2];

        Map<String, String> options = allOptions.get(iterName);
        if (options == null) {
          options = new HashMap<>();
          allOptions.put(iterName, options);
        }

        options.put(optName, entry.getValue());

      } else {
        log.warn("Unrecognizable option: " + entry.getKey());
      }
    }

    Collections.sort(iters, new IterInfoComparator());
  }
Example #7
0
  protected Scanner getScanner(StringBuilder sb)
      throws AccumuloException, AccumuloSecurityException {
    AccumuloConfiguration conf = Monitor.getSystemConfiguration();
    String principal = conf.get(Property.TRACE_USER);
    AuthenticationToken at;
    Map<String, String> loginMap =
        conf.getAllPropertiesWithPrefix(Property.TRACE_TOKEN_PROPERTY_PREFIX);
    if (loginMap.isEmpty()) {
      Property p = Property.TRACE_PASSWORD;
      at = new PasswordToken(conf.get(p).getBytes(StandardCharsets.UTF_8));
    } else {
      Properties props = new Properties();
      int prefixLength = Property.TRACE_TOKEN_PROPERTY_PREFIX.getKey().length();
      for (Entry<String, String> entry : loginMap.entrySet()) {
        props.put(entry.getKey().substring(prefixLength), entry.getValue());
      }

      AuthenticationToken token =
          Property.createInstanceFromPropertyName(
              conf, Property.TRACE_TOKEN_TYPE, AuthenticationToken.class, new PasswordToken());
      token.init(props);
      at = token;
    }

    String table = conf.get(Property.TRACE_TABLE);
    try {
      Connector conn = HdfsZooInstance.getInstance().getConnector(principal, at);
      if (!conn.tableOperations().exists(table)) {
        return new NullScanner();
      }
      Scanner scanner =
          conn.createScanner(table, conn.securityOperations().getUserAuthorizations(principal));
      return scanner;
    } catch (AccumuloSecurityException ex) {
      sb.append(
          "<h2>Unable to read trace table: check trace username and password configuration.</h2>\n");
      return null;
    } catch (TableNotFoundException ex) {
      return new NullScanner();
    }
  }
Example #8
0
  /**
   * Validates the given configuration entries. A valid configuration contains only valid properties
   * (i.e., defined or otherwise valid) that are not prefixes and whose values are formatted
   * correctly for their property types. A valid configuration also contains a value for property
   * {@link Property#INSTANCE_ZK_TIMEOUT} within a valid range.
   *
   * @param entries iterable through configuration keys and values
   * @throws SanityCheckException if a fatal configuration error is found
   */
  public static void validate(Iterable<Entry<String, String>> entries) {
    String instanceZkTimeoutValue = null;
    boolean usingVolumes = false;
    for (Entry<String, String> entry : entries) {
      String key = entry.getKey();
      String value = entry.getValue();
      Property prop = Property.getPropertyByKey(entry.getKey());
      if (prop == null && Property.isValidPropertyKey(key))
        continue; // unknown valid property (i.e. has proper prefix)
      else if (prop == null) log.warn(PREFIX + "unrecognized property key (" + key + ")");
      else if (prop.getType() == PropertyType.PREFIX)
        fatal(PREFIX + "incomplete property key (" + key + ")");
      else if (!prop.getType().isValidFormat(value))
        fatal(
            PREFIX
                + "improperly formatted value for key ("
                + key
                + ", type="
                + prop.getType()
                + ")");

      if (key.equals(Property.INSTANCE_ZK_TIMEOUT.getKey())) {
        instanceZkTimeoutValue = value;
      }

      if (key.equals(Property.INSTANCE_VOLUMES.getKey())) {
        usingVolumes = value != null && !value.isEmpty();
      }
    }

    if (instanceZkTimeoutValue != null) {
      checkTimeDuration(
          Property.INSTANCE_ZK_TIMEOUT,
          instanceZkTimeoutValue,
          new CheckTimeDurationBetween(1000, 300000));
    }

    if (!usingVolumes) {
      log.warn(
          "Use of "
              + INSTANCE_DFS_URI
              + " and "
              + INSTANCE_DFS_DIR
              + " are deprecated. Consider using "
              + Property.INSTANCE_VOLUMES
              + " instead.");
    }
  }
Example #9
0
 @Override
 public String get(Property property) {
   return map.get(property.getKey());
 }
Example #10
0
 private static void verifyPropertyTypes(PropertyType type, Property... properties) {
   for (Property prop : properties)
     if (prop.getType() != type)
       fatal("Unexpected property type (" + prop.getType() + " != " + type + ")");
 }
Example #11
0
 public static Property getPropertyByKey(String key) {
   for (Property prop : Property.values()) if (prop.getKey().equals(key)) return prop;
   return null;
 }
  public TabletServerResourceManager(TabletServer tserver, VolumeManager fs) {
    this.tserver = tserver;
    this.conf = tserver.getServerConfigurationFactory();
    this.fs = fs;
    final AccumuloConfiguration acuConf = conf.getConfiguration();

    long maxMemory = acuConf.getMemoryInBytes(Property.TSERV_MAXMEM);
    boolean usingNativeMap =
        acuConf.getBoolean(Property.TSERV_NATIVEMAP_ENABLED) && NativeMap.isLoaded();

    long blockSize = acuConf.getMemoryInBytes(Property.TSERV_DEFAULT_BLOCKSIZE);
    long dCacheSize = acuConf.getMemoryInBytes(Property.TSERV_DATACACHE_SIZE);
    long iCacheSize = acuConf.getMemoryInBytes(Property.TSERV_INDEXCACHE_SIZE);
    long totalQueueSize = acuConf.getMemoryInBytes(Property.TSERV_TOTAL_MUTATION_QUEUE_MAX);

    _iCache = new LruBlockCache(iCacheSize, blockSize);
    _dCache = new LruBlockCache(dCacheSize, blockSize);

    Runtime runtime = Runtime.getRuntime();
    if (usingNativeMap) {
      // Still check block cache sizes when using native maps.
      if (dCacheSize + iCacheSize + totalQueueSize > runtime.maxMemory()) {
        throw new IllegalArgumentException(
            String.format(
                "Block cache sizes %,d and mutation queue size %,d is too large for this JVM configuration %,d",
                dCacheSize + iCacheSize, totalQueueSize, runtime.maxMemory()));
      }
    } else if (maxMemory + dCacheSize + iCacheSize + totalQueueSize > runtime.maxMemory()) {
      throw new IllegalArgumentException(
          String.format(
              "Maximum tablet server map memory %,d block cache sizes %,d and mutation queue size %,d is too large for this JVM configuration %,d",
              maxMemory, dCacheSize + iCacheSize, totalQueueSize, runtime.maxMemory()));
    }
    runtime.gc();

    // totalMemory - freeMemory = memory in use
    // maxMemory - memory in use = max available memory
    if (!usingNativeMap
        && maxMemory > runtime.maxMemory() - (runtime.totalMemory() - runtime.freeMemory())) {
      log.warn("In-memory map may not fit into local memory space.");
    }

    minorCompactionThreadPool = createEs(Property.TSERV_MINC_MAXCONCURRENT, "minor compactor");

    // make this thread pool have a priority queue... and execute tablets with the most
    // files first!
    majorCompactionThreadPool =
        createEs(
            Property.TSERV_MAJC_MAXCONCURRENT,
            "major compactor",
            new CompactionQueue().asBlockingQueueOfRunnable());
    rootMajorCompactionThreadPool = createEs(0, 1, 300, "md root major compactor");
    defaultMajorCompactionThreadPool = createEs(0, 1, 300, "md major compactor");

    splitThreadPool = createEs(1, "splitter");
    defaultSplitThreadPool = createEs(0, 1, 60, "md splitter");

    defaultMigrationPool = createEs(0, 1, 60, "metadata tablet migration");
    migrationPool = createEs(Property.TSERV_MIGRATE_MAXCONCURRENT, "tablet migration");

    // not sure if concurrent assignments can run safely... even if they could there is probably no
    // benefit at startup because
    // individual tablet servers are already running assignments concurrently... having each
    // individual tablet server run
    // concurrent assignments would put more load on the metadata table at startup
    assignmentPool = createEs(Property.TSERV_ASSIGNMENT_MAXCONCURRENT, "tablet assignment");

    assignMetaDataPool = createEs(0, 1, 60, "metadata tablet assignment");

    activeAssignments = new ConcurrentHashMap<KeyExtent, RunnableStartedAt>();

    readAheadThreadPool = createEs(Property.TSERV_READ_AHEAD_MAXCONCURRENT, "tablet read ahead");
    defaultReadAheadThreadPool =
        createEs(Property.TSERV_METADATA_READ_AHEAD_MAXCONCURRENT, "metadata tablets read ahead");

    int maxOpenFiles = acuConf.getCount(Property.TSERV_SCAN_MAX_OPENFILES);

    fileManager = new FileManager(tserver, fs, maxOpenFiles, _dCache, _iCache);

    memoryManager =
        Property.createInstanceFromPropertyName(
            acuConf, Property.TSERV_MEM_MGMT, MemoryManager.class, new LargestFirstMemoryManager());
    memoryManager.init(tserver.getServerConfigurationFactory());
    memMgmt = new MemoryManagementFramework();
    memMgmt.startThreads();

    SimpleTimer timer = SimpleTimer.getInstance(tserver.getConfiguration());

    // We can use the same map for both metadata and normal assignments since the keyspace (extent)
    // is guaranteed to be unique. Schedule the task once, the task will reschedule itself.
    timer.schedule(new AssignmentWatcher(acuConf, activeAssignments, timer), 5000);
  }