示例#1
0
  /**
   * Generate a dummy namenode proxy instance that utilizes our hacked {@link
   * LossyRetryInvocationHandler}. Proxy instance generated using this method will proactively drop
   * RPC responses. Currently this method only support HA setup. null will be returned if the given
   * configuration is not for HA.
   *
   * @param config the configuration containing the required IPC properties, client failover
   *     configurations, etc.
   * @param nameNodeUri the URI pointing either to a specific NameNode or to a logical nameservice.
   * @param xface the IPC interface which should be created
   * @param numResponseToDrop The number of responses to drop for each RPC call
   * @param fallbackToSimpleAuth set to true or false during calls to indicate if a secure client
   *     falls back to simple auth
   * @return an object containing both the proxy and the associated delegation token service it
   *     corresponds to. Will return null of the given configuration does not support HA.
   * @throws IOException if there is an error creating the proxy
   */
  @SuppressWarnings("unchecked")
  public static <T> ProxyAndInfo<T> createProxyWithLossyRetryHandler(
      Configuration config,
      URI nameNodeUri,
      Class<T> xface,
      int numResponseToDrop,
      AtomicBoolean fallbackToSimpleAuth)
      throws IOException {
    Preconditions.checkArgument(numResponseToDrop > 0);
    AbstractNNFailoverProxyProvider<T> failoverProxyProvider =
        createFailoverProxyProvider(config, nameNodeUri, xface, true, fallbackToSimpleAuth);

    if (failoverProxyProvider != null) { // HA case
      int delay =
          config.getInt(
              HdfsClientConfigKeys.Failover.SLEEPTIME_BASE_KEY,
              HdfsClientConfigKeys.Failover.SLEEPTIME_BASE_DEFAULT);
      int maxCap =
          config.getInt(
              HdfsClientConfigKeys.Failover.SLEEPTIME_MAX_KEY,
              HdfsClientConfigKeys.Failover.SLEEPTIME_MAX_DEFAULT);
      int maxFailoverAttempts =
          config.getInt(
              HdfsClientConfigKeys.Failover.MAX_ATTEMPTS_KEY,
              HdfsClientConfigKeys.Failover.MAX_ATTEMPTS_DEFAULT);
      int maxRetryAttempts =
          config.getInt(
              HdfsClientConfigKeys.Retry.MAX_ATTEMPTS_KEY,
              HdfsClientConfigKeys.Retry.MAX_ATTEMPTS_DEFAULT);
      InvocationHandler dummyHandler =
          new LossyRetryInvocationHandler<T>(
              numResponseToDrop,
              failoverProxyProvider,
              RetryPolicies.failoverOnNetworkException(
                  RetryPolicies.TRY_ONCE_THEN_FAIL,
                  maxFailoverAttempts,
                  Math.max(numResponseToDrop + 1, maxRetryAttempts),
                  delay,
                  maxCap));

      T proxy =
          (T)
              Proxy.newProxyInstance(
                  failoverProxyProvider.getInterface().getClassLoader(),
                  new Class[] {xface},
                  dummyHandler);
      Text dtService;
      if (failoverProxyProvider.useLogicalURI()) {
        dtService =
            HAUtilClient.buildTokenServiceForLogicalUri(nameNodeUri, HdfsConstants.HDFS_URI_SCHEME);
      } else {
        dtService = SecurityUtil.buildTokenService(NameNode.getAddress(nameNodeUri));
      }
      return new ProxyAndInfo<T>(proxy, dtService, NameNode.getAddress(nameNodeUri));
    } else {
      LOG.warn(
          "Currently creating proxy using " + "LossyRetryInvocationHandler requires NN HA setup");
      return null;
    }
  }
  private static FileSystemStore createDefaultStore(Configuration conf) {
    FileSystemStore store = new JetOssFileSystemStore();

    RetryPolicy basePolicy =
        RetryPolicies.retryUpToMaximumCountWithFixedSleep(
            conf.getInt("fs.oss.maxRetries", 4),
            conf.getLong("fs.oss.sleepTimeSeconds", 10),
            TimeUnit.SECONDS);
    Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap =
        new HashMap<Class<? extends Exception>, RetryPolicy>();
    exceptionToPolicyMap.put(IOException.class, basePolicy);
    exceptionToPolicyMap.put(OssException.class, basePolicy);

    RetryPolicy methodPolicy =
        RetryPolicies.retryByException(RetryPolicies.TRY_ONCE_THEN_FAIL, exceptionToPolicyMap);
    Map<String, RetryPolicy> methodNameToPolicyMap = new HashMap<String, RetryPolicy>();
    methodNameToPolicyMap.put("storeBlock", methodPolicy);
    methodNameToPolicyMap.put("retrieveBlock", methodPolicy);

    return (FileSystemStore) RetryProxy.create(FileSystemStore.class, store, methodNameToPolicyMap);
  }
示例#3
0
 // TODO: Replace this whenever hadoop gets their act together and stops breaking with more recent
 // versions of Guava
 public static long unzipNoGuava(
     final Path zip,
     final Configuration configuration,
     final File outDir,
     final Progressable progressable)
     throws IOException {
   final DataPusher zipPusher =
       (DataPusher)
           RetryProxy.create(
               DataPusher.class,
               new DataPusher() {
                 @Override
                 public long push() throws IOException {
                   try {
                     final FileSystem fileSystem = zip.getFileSystem(configuration);
                     long size = 0L;
                     final byte[] buffer = new byte[1 << 13];
                     progressable.progress();
                     try (ZipInputStream in = new ZipInputStream(fileSystem.open(zip, 1 << 13))) {
                       for (ZipEntry entry = in.getNextEntry();
                           entry != null;
                           entry = in.getNextEntry()) {
                         final String fileName = entry.getName();
                         try (final OutputStream out =
                             new BufferedOutputStream(
                                 new FileOutputStream(
                                     outDir.getAbsolutePath() + File.separator + fileName),
                                 1 << 13)) {
                           for (int len = in.read(buffer); len >= 0; len = in.read(buffer)) {
                             progressable.progress();
                             if (len == 0) {
                               continue;
                             }
                             size += len;
                             out.write(buffer, 0, len);
                           }
                           out.flush();
                         }
                       }
                     }
                     progressable.progress();
                     return size;
                   } catch (IOException | RuntimeException exception) {
                     log.error(exception, "Exception in unzip retry loop");
                     throw exception;
                   }
                 }
               },
               RetryPolicies.exponentialBackoffRetry(
                   NUM_RETRIES, SECONDS_BETWEEN_RETRIES, TimeUnit.SECONDS));
   return zipPusher.push();
 }
示例#4
0
 private static NamenodeProtocol createNNProxyWithNamenodeProtocol(
     InetSocketAddress address, Configuration conf, UserGroupInformation ugi, boolean withRetries)
     throws IOException {
   NamenodeProtocolPB proxy =
       (NamenodeProtocolPB) createNameNodeProxy(address, conf, ugi, NamenodeProtocolPB.class);
   if (withRetries) { // create the proxy with retries
     RetryPolicy timeoutPolicy =
         RetryPolicies.exponentialBackoffRetry(5, 200, TimeUnit.MILLISECONDS);
     Map<String, RetryPolicy> methodNameToPolicyMap = new HashMap<String, RetryPolicy>();
     methodNameToPolicyMap.put("getBlocks", timeoutPolicy);
     methodNameToPolicyMap.put("getAccessKeys", timeoutPolicy);
     NamenodeProtocol translatorProxy = new NamenodeProtocolTranslatorPB(proxy);
     return (NamenodeProtocol)
         RetryProxy.create(NamenodeProtocol.class, translatorProxy, methodNameToPolicyMap);
   } else {
     return new NamenodeProtocolTranslatorPB(proxy);
   }
 }
示例#5
0
  /**
   * Creates the namenode proxy with the passed protocol. This will handle creation of either HA- or
   * non-HA-enabled proxy objects, depending upon if the provided URI is a configured logical URI.
   *
   * @param conf the configuration containing the required IPC properties, client failover
   *     configurations, etc.
   * @param nameNodeUri the URI pointing either to a specific NameNode or to a logical nameservice.
   * @param xface the IPC interface which should be created
   * @param fallbackToSimpleAuth set to true or false during calls to indicate if a secure client
   *     falls back to simple auth
   * @return an object containing both the proxy and the associated delegation token service it
   *     corresponds to
   * @throws IOException if there is an error creating the proxy
   */
  @SuppressWarnings("unchecked")
  public static <T> ProxyAndInfo<T> createProxy(
      Configuration conf, URI nameNodeUri, Class<T> xface, AtomicBoolean fallbackToSimpleAuth)
      throws IOException {
    AbstractNNFailoverProxyProvider<T> failoverProxyProvider =
        createFailoverProxyProvider(conf, nameNodeUri, xface, true, fallbackToSimpleAuth);

    if (failoverProxyProvider == null) {
      // Non-HA case
      return createNonHAProxy(
          conf,
          NameNode.getAddress(nameNodeUri),
          xface,
          UserGroupInformation.getCurrentUser(),
          true,
          fallbackToSimpleAuth);
    } else {
      // HA case
      DfsClientConf config = new DfsClientConf(conf);
      T proxy =
          (T)
              RetryProxy.create(
                  xface,
                  failoverProxyProvider,
                  RetryPolicies.failoverOnNetworkException(
                      RetryPolicies.TRY_ONCE_THEN_FAIL,
                      config.getMaxFailoverAttempts(),
                      config.getMaxRetryAttempts(),
                      config.getFailoverSleepBaseMillis(),
                      config.getFailoverSleepMaxMillis()));

      Text dtService;
      if (failoverProxyProvider.useLogicalURI()) {
        dtService =
            HAUtilClient.buildTokenServiceForLogicalUri(nameNodeUri, HdfsConstants.HDFS_URI_SCHEME);
      } else {
        dtService = SecurityUtil.buildTokenService(NameNode.getAddress(nameNodeUri));
      }
      return new ProxyAndInfo<T>(proxy, dtService, NameNode.getAddress(nameNodeUri));
    }
  }
示例#6
0
 public static void writeSegmentDescriptor(
     final FileSystem outputFS,
     final DataSegment segment,
     final Path descriptorPath,
     final Progressable progressable)
     throws IOException {
   final DataPusher descriptorPusher =
       (DataPusher)
           RetryProxy.create(
               DataPusher.class,
               new DataPusher() {
                 @Override
                 public long push() throws IOException {
                   try {
                     progressable.progress();
                     if (outputFS.exists(descriptorPath)) {
                       if (!outputFS.delete(descriptorPath, false)) {
                         throw new IOException(
                             String.format("Failed to delete descriptor at [%s]", descriptorPath));
                       }
                     }
                     try (final OutputStream descriptorOut =
                         outputFS.create(
                             descriptorPath, true, DEFAULT_FS_BUFFER_SIZE, progressable)) {
                       HadoopDruidIndexerConfig.jsonMapper.writeValue(descriptorOut, segment);
                       descriptorOut.flush();
                     }
                   } catch (RuntimeException | IOException ex) {
                     log.info(ex, "Exception in descriptor pusher retry loop");
                     throw ex;
                   }
                   return -1;
                 }
               },
               RetryPolicies.exponentialBackoffRetry(
                   NUM_RETRIES, SECONDS_BETWEEN_RETRIES, TimeUnit.SECONDS));
   descriptorPusher.push();
 }
示例#7
0
  public static DataSegment serializeOutIndex(
      final DataSegment segmentTemplate,
      final Configuration configuration,
      final Progressable progressable,
      final TaskAttemptID taskAttemptID,
      final File mergedBase,
      final Path segmentBasePath)
      throws IOException {
    final FileSystem outputFS = FileSystem.get(segmentBasePath.toUri(), configuration);
    final Path tmpPath =
        new Path(segmentBasePath, String.format("index.zip.%d", taskAttemptID.getId()));
    final AtomicLong size = new AtomicLong(0L);
    final DataPusher zipPusher =
        (DataPusher)
            RetryProxy.create(
                DataPusher.class,
                new DataPusher() {
                  @Override
                  public long push() throws IOException {
                    try (OutputStream outputStream =
                        outputFS.create(tmpPath, true, DEFAULT_FS_BUFFER_SIZE, progressable)) {
                      size.set(zipAndCopyDir(mergedBase, outputStream, progressable));
                      outputStream.flush();
                    } catch (IOException | RuntimeException exception) {
                      log.error(exception, "Exception in retry loop");
                      throw exception;
                    }
                    return -1;
                  }
                },
                RetryPolicies.exponentialBackoffRetry(
                    NUM_RETRIES, SECONDS_BETWEEN_RETRIES, TimeUnit.SECONDS));
    zipPusher.push();
    log.info("Zipped %,d bytes to [%s]", size.get(), tmpPath.toUri());

    final Path finalIndexZipFilePath = new Path(segmentBasePath, "index.zip");
    final URI indexOutURI = finalIndexZipFilePath.toUri();
    final ImmutableMap<String, Object> loadSpec;
    // TODO: Make this a part of Pushers or Pullers
    switch (outputFS.getScheme()) {
      case "hdfs":
        loadSpec = ImmutableMap.<String, Object>of("type", "hdfs", "path", indexOutURI.toString());
        break;
      case "s3":
      case "s3n":
        loadSpec =
            ImmutableMap.<String, Object>of(
                "type", "s3_zip",
                "bucket", indexOutURI.getHost(),
                "key", indexOutURI.getPath().substring(1) // remove the leading "/"
                );
        break;
      case "file":
        loadSpec = ImmutableMap.<String, Object>of("type", "local", "path", indexOutURI.getPath());
        break;
      default:
        throw new IAE("Unknown file system scheme [%s]", outputFS.getScheme());
    }
    final DataSegment finalSegment =
        segmentTemplate
            .withLoadSpec(loadSpec)
            .withSize(size.get())
            .withBinaryVersion(SegmentUtils.getVersionFromDir(mergedBase));

    if (!renameIndexFiles(outputFS, tmpPath, finalIndexZipFilePath)) {
      throw new IOException(
          String.format(
              "Unable to rename [%s] to [%s]",
              tmpPath.toUri().toString(), finalIndexZipFilePath.toUri().toString()));
    }
    writeSegmentDescriptor(
        outputFS, finalSegment, new Path(segmentBasePath, "descriptor.json"), progressable);
    return finalSegment;
  }