public int run(String[] args) throws Exception {
    Configuration argConf = getConf();

    // JobConf conf = new JobConf(diffdb.class);
    Configuration config = HBaseConfiguration.create();
    HBaseAdmin hbAdmin = new HBaseAdmin(config);
    dbutil db_util = new dbutil(config);

    HTable runTable = new HTable(config, "gestore_runs");
    Get runGet = new Get(argConf.get("id").getBytes());
    Result pipeline = runTable.get(runGet);

    NavigableMap<byte[], byte[]> pipeMap = pipeline.getFamilyMap("d".getBytes());

    Map.Entry<byte[], byte[]> results = pipeMap.pollFirstEntry();

    HashMap<String, HashMap<String, String>> resultMap =
        new HashMap<String, HashMap<String, String>>();

    while (results != null) {
      String resultKey = new String(results.getKey());
      String resultValue = new String(results.getValue());
      String field = "type";
      HashMap<String, String> tempMap = new HashMap<String, String>();
      String entry = resultKey;

      if (resultKey.endsWith("_db_timestamp")) {
        field = "db_timestamp";
        entry = resultKey.substring(0, resultKey.lastIndexOf("_db_timestamp"));
      } else if (resultKey.endsWith("_filename")) {
        field = "filename";
        entry = resultKey.substring(0, resultKey.lastIndexOf("_filename"));
      } else if (resultKey.endsWith("_regex")) {
        field = "regex";
        entry = resultKey.substring(0, resultKey.lastIndexOf("_regex"));
      }

      if (resultMap.containsKey(entry)) {
        tempMap = resultMap.get(entry);
      }

      tempMap.put(field, resultValue);
      resultMap.put(entry, tempMap);

      // System.out.println("Key: " + resultKey + " Value: " + resultValue);
      results = pipeMap.pollFirstEntry();
    }

    for (String key : resultMap.keySet()) {
      System.out.println("File ID: " + key);
      for (String subKey : resultMap.get(key).keySet()) {
        // System.out.println("\t " + subKey + "\t\t" + resultMap.get(key).get(subKey));
        System.out.format("  %1$-20s  %2$s\n", subKey, resultMap.get(key).get(subKey));
      }
    }

    return 0;
  }
  @SuppressWarnings("StringBufferMayBeStringBuilder")
  public HBaseOperations(
      String tableName, List<String> ColumnFamilies, List<String> zookeeperIPs, int zkPort) {
    conf = HBaseConfiguration.create();

    StringBuffer zookeeperIP = new StringBuffer();

    for (String zookeeper : zookeeperIPs) {
      zookeeperIP.append(zookeeper).append(',');
    }
    zookeeperIP.deleteCharAt(zookeeperIP.length() - 1);
    System.out.println(zookeeperIP.toString());
    conf.set("hbase.zookeeper.quorum", zookeeperIP.toString());
    conf.setInt("hbase.zookeeper.property.clientPort", zkPort);
    conf.set("zookeeper.znode.parent", "/hbase-unsecure");

    createTable(tableName, ColumnFamilies);

    try {
      hTable = new HTable(conf, tableName);
    } catch (IOException e) {
      System.out.println("Error occurred while creating instance of HTable class:" + e);
    }
  }
Example #3
0
/** Place to put common HBase things. */
public class HBaseUtils {

  private static final int KERBEROS_EXPIRATION_HOURS = 11;
  private final Configuration conf = HBaseConfiguration.create();
  private final int regions;
  private boolean valid;
  private Connection connection;
  private ScheduledExecutorService kerberosRefresher =
      Executors.newScheduledThreadPool(1, new NamedThreadFactory("kerberos.executor", false));

  public HBaseUtils(
      String quorum,
      boolean useKerberos,
      String keyTabUsername,
      String kerberosEnv,
      String keyTabFileLocation,
      int regions)
      throws IOException {
    this.regions = regions;
    conf.set("hbase.zookeeper.quorum", quorum);
    if (useKerberos) {
      conf.set("hadoop.security.authentication", "Kerberos");
      conf.set("hbase.security.authentication", "Kerberos");
      conf.set("hbase.master.kerberos.principal", "hbase/_HOST@" + kerberosEnv + ".YOURDOMAIN.COM");
      conf.set(
          "hbase.regionserver.kerberos.principal",
          "hbase/_HOST@" + kerberosEnv + ".YOURDOMAIN.COM");
      conf.set("hbase.client.keyvalue.maxsize", "-1");
      UserGroupInformation.setConfiguration(conf);
      try {
        UserGroupInformation.loginUserFromKeytab(
            keyTabUsername + "@" + kerberosEnv + ".YOURDOMAIN.COM", keyTabFileLocation);
        valid = true;
      } catch (IOException e) {
        e.printStackTrace();
        valid = false;
      }
      kerberosRefresher.scheduleAtFixedRate(
          () -> {
            try {
              UserGroupInformation ugi = UserGroupInformation.getLoginUser();
              if (ugi == null) {
                Logger.error("KERBEROS GOT LOGGED OUT");
                UserGroupInformation.loginUserFromKeytab(
                    keyTabUsername + "@" + kerberosEnv + ".YOURDOMAIN.COM", keyTabFileLocation);
              } else {
                ugi.checkTGTAndReloginFromKeytab();
              }
            } catch (IOException e) {
              e.printStackTrace();
            }
          },
          KERBEROS_EXPIRATION_HOURS,
          KERBEROS_EXPIRATION_HOURS,
          TimeUnit.HOURS);
    } else {
      valid = true;
      conf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/hbase-unsecure");
    }
    connection = ConnectionFactory.createConnection(conf);
  }

  public Optional<byte[]> get(
      Optional<String> table,
      Optional<String> family,
      Optional<String> qualifier,
      Optional<String> key) {
    if (!valid) {
      Logger.error("CANNOT GET! NO VALID CONNECTION");
      return Optional.empty();
    }
    if (table.isPresent()
        && family.isPresent()
        && qualifier.isPresent()
        && key.isPresent()
        && !key.get().isEmpty()) {
      try {
        final Table htable = connection.getTable(TableName.valueOf(table.get()));
        Result result = htable.get(new Get(key.get().getBytes("UTF8")));
        return Optional.ofNullable(
            result.getValue(family.get().getBytes("UTF8"), qualifier.get().getBytes("UTF8")));
      } catch (IOException e) {
        e.printStackTrace();
      }
    }
    return Optional.empty();
  }

  public Optional<Response> get(Optional<Request> request) {
    if (!valid) {
      Logger.error("CANNOT GET! NO VALID CONNECTION");
      return Optional.empty();
    }
    Response response = new Response();
    if (request.isPresent()) {
      Request r = request.get();
      response.key = r.key;
      response.table = r.table;
      try {
        final Table htable = connection.getTable(TableName.valueOf(r.table));
        Result result = htable.get(new Get(r.key));
        if (result == null || result.isEmpty()) {
          return Optional.empty();
        }
        r.columns.forEach(
            c ->
                response.columns.add(
                    new Request.Column(
                        c.family,
                        c.qualifier,
                        result.getValue(c.family.getBytes(), c.qualifier.getBytes()))));
      } catch (IOException e) {
        e.printStackTrace();
      }
    }
    return Optional.of(response);
  }

  public Optional<List<Response>> getBatch(Optional<List<Request>> requests) {
    if (!valid) {
      Logger.error("CANNOT GET! NO VALID CONNECTION");
      return Optional.empty();
    }
    List<Response> responses = new ArrayList<>();
    requests.ifPresent(
        reqs ->
            reqs.forEach(r -> get(Optional.of(r)).ifPresent(response -> responses.add(response))));
    return Optional.of(responses);
  }

  public void putBatch(Optional<List<Request>> putRequests, boolean optimize) {
    if (!valid) {
      Logger.error("CANNOT PUT! NO VALID CONNECTION");
      return;
    }
    List<Put> puts = new ArrayList<>();
    if (putRequests.isPresent() && !putRequests.get().isEmpty()) {
      String tableName = putRequests.get().get(0).table;
      putRequests
          .get()
          .forEach(
              pr ->
                  pr.getPut()
                      .ifPresent(
                          p -> {
                            if (optimize) {
                              p.setDurability(Durability.SKIP_WAL);
                            }
                            puts.add(p);
                          }));
      try {
        final Table table = connection.getTable(TableName.valueOf(tableName));
        if (optimize && table instanceof HTable) {
          ((HTable) table).setAutoFlush(false, true);
        }
        table.put(puts);
        table.close();
      } catch (IOException e) {
        e.printStackTrace();
      }
    }
  }

  public void put(Optional<Request> putRequest) {
    if (!valid) {
      Logger.error("CANNOT PUT! NO VALID CONNECTION");
      return;
    }
    putRequest.ifPresent(
        pr ->
            pr.getPut()
                .ifPresent(
                    p -> {
                      try {
                        final Table table = connection.getTable(TableName.valueOf(pr.table));
                        table.put(p);
                        table.close();
                      } catch (IOException e) {
                        e.printStackTrace();
                      }
                    }));
  }

  public Result get(String table, String family, byte[] key) {
    final Table htable;
    try {
      htable = connection.getTable(TableName.valueOf(table));
      return htable.get(new Get(key));
    } catch (IOException e) {
      e.printStackTrace();
    }
    return null;
  }

  public void put(String tablename, Put p) {
    try {
      final Table table = connection.getTable(TableName.valueOf(tablename));
      table.put(p);
      table.close();
    } catch (IOException e) {
      e.printStackTrace();
    }
  }

  public Map<String, Long> getRegionSizes(String tableName) {
    Map<String, Long> regions = new HashMap<>();
    try {
      final Table table = connection.getTable(TableName.valueOf(tableName));
      RegionLocator regionLocator = connection.getRegionLocator(table.getName());
      List<HRegionLocation> tableRegionInfos = regionLocator.getAllRegionLocations();
      Set<byte[]> tableRegions = new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
      for (HRegionLocation regionInfo : tableRegionInfos) {
        tableRegions.add(regionInfo.getRegionInfo().getRegionName());
      }
      ClusterStatus clusterStatus = connection.getAdmin().getClusterStatus();
      Collection<ServerName> servers = clusterStatus.getServers();
      final long megaByte = 1024L * 1024L;
      for (ServerName serverName : servers) {
        ServerLoad serverLoad = clusterStatus.getLoad(serverName);
        for (RegionLoad regionLoad : serverLoad.getRegionsLoad().values()) {
          byte[] regionId = regionLoad.getName();
          if (tableRegions.contains(regionId)) {
            long regionSizeBytes = regionLoad.getStorefileSizeMB() * megaByte;
            regions.put(regionLoad.getNameAsString(), regionSizeBytes);
          }
        }
      }
    } catch (IOException e) {
      e.printStackTrace();
    }
    return regions;
  }

  public void createTable(String tableName, String columnFamily) {
    List<String> families = new ArrayList<>();
    families.add(columnFamily);
    createTable(tableName, families);
  }

  public void createTable(String tableName, List<String> columnFamilies) {
    try {
      Admin admin = connection.getAdmin();
      HTableDescriptor descriptor = new HTableDescriptor(TableName.valueOf(tableName));
      for (String family : columnFamilies) {
        descriptor.addFamily(new HColumnDescriptor(family));
      }
      admin.createTable(descriptor);
    } catch (IOException e) {
      e.printStackTrace();
    }
  }

  public boolean tableExists(String tableName) {
    try {
      Admin admin = connection.getAdmin();
      return admin.tableExists(TableName.valueOf(tableName));
    } catch (IOException e) {
      e.printStackTrace();
    }
    return false;
  }

  public boolean removeTable(String tableName) {
    try {
      Admin admin = connection.getAdmin();
      TableName t = TableName.valueOf(tableName);
      if (admin.tableExists(t)) {
        admin.disableTable(t);
        admin.deleteTable(t);
        return true;
      }
    } catch (IOException e) {
      e.printStackTrace();
    }
    return false;
  }

  public void scan(
      Consumer<Result> callback, String tableName, String columnFamily, String... qualifiers) {
    if (callback != null && tableName != null && columnFamily != null && qualifiers != null) {
      Scan s = new Scan();
      byte[] family = Bytes.toBytes(columnFamily);
      for (String qualifier : qualifiers) {
        s.addColumn(family, Bytes.toBytes(qualifier));
      }
      try {
        final Table table = connection.getTable(TableName.valueOf(tableName));
        ResultScanner scanner = table.getScanner(s);
        for (Result r = scanner.next(); r != null; r = scanner.next()) {
          callback.accept(r);
        }
      } catch (IOException e) {
        e.printStackTrace();
      }
    }
  }

  public void printStats() throws IOException {
    Admin admin = connection.getAdmin();

    ClusterStatus status =
        admin.getClusterStatus(); // co ClusterStatusExample-1-GetStatus Get the cluster status.
    System.out.println("Cluster Status:\n--------------");
    System.out.println("HBase Version: " + status.getHBaseVersion());
    System.out.println("Version: " + status.getVersion());
    System.out.println("Cluster ID: " + status.getClusterId());
    System.out.println("Master: " + status.getMaster());
    System.out.println("No. Backup Masters: " + status.getBackupMastersSize());
    System.out.println("Backup Masters: " + status.getBackupMasters());
    System.out.println("No. Live Servers: " + status.getServersSize());
    System.out.println("Servers: " + status.getServers());
    System.out.println("No. Dead Servers: " + status.getDeadServers());
    System.out.println("Dead Servers: " + status.getDeadServerNames());
    System.out.println("No. Regions: " + status.getRegionsCount());
    System.out.println("Regions in Transition: " + status.getRegionsInTransition());
    System.out.println("No. Requests: " + status.getRequestsCount());
    System.out.println("Avg Load: " + status.getAverageLoad());
    System.out.println("Balancer On: " + status.getBalancerOn());
    System.out.println("Is Balancer On: " + status.isBalancerOn());
    System.out.println("Master Coprocessors: " + Arrays.asList(status.getMasterCoprocessors()));
    System.out.println("\nServer Info:\n--------------");
    for (ServerName server :
        status
            .getServers()) { // co ClusterStatusExample-2-ServerInfo Iterate over the included
                             // server instances.
      System.out.println("Hostname: " + server.getHostname());
      System.out.println("Host and Port: " + server.getHostAndPort());
      System.out.println("Server Name: " + server.getServerName());
      System.out.println("RPC Port: " + server.getPort());
      System.out.println("Start Code: " + server.getStartcode());
      ServerLoad load =
          status.getLoad(
              server); // co ClusterStatusExample-3-ServerLoad Retrieve the load details for the
                       // current server.
      System.out.println("\nServer Load:\n--------------");
      System.out.println("Info Port: " + load.getInfoServerPort());
      System.out.println("Load: " + load.getLoad());
      System.out.println("Max Heap (MB): " + load.getMaxHeapMB());
      System.out.println("Used Heap (MB): " + load.getUsedHeapMB());
      System.out.println("Memstore Size (MB): " + load.getMemstoreSizeInMB());
      System.out.println("No. Regions: " + load.getNumberOfRegions());
      System.out.println("No. Requests: " + load.getNumberOfRequests());
      System.out.println("Total No. Requests: " + load.getTotalNumberOfRequests());
      System.out.println("No. Requests per Sec: " + load.getRequestsPerSecond());
      System.out.println("No. Read Requests: " + load.getReadRequestsCount());
      System.out.println("No. Write Requests: " + load.getWriteRequestsCount());
      System.out.println("No. Stores: " + load.getStores());
      System.out.println("Store Size Uncompressed (MB): " + load.getStoreUncompressedSizeMB());
      System.out.println("No. Storefiles: " + load.getStorefiles());
      System.out.println("Storefile Size (MB): " + load.getStorefileSizeInMB());
      System.out.println("Storefile Index Size (MB): " + load.getStorefileIndexSizeInMB());
      System.out.println("Root Index Size: " + load.getRootIndexSizeKB());
      System.out.println("Total Bloom Size: " + load.getTotalStaticBloomSizeKB());
      System.out.println("Total Index Size: " + load.getTotalStaticIndexSizeKB());
      System.out.println("Current Compacted Cells: " + load.getCurrentCompactedKVs());
      System.out.println("Total Compacting Cells: " + load.getTotalCompactingKVs());
      System.out.println("Coprocessors1: " + Arrays.asList(load.getRegionServerCoprocessors()));
      System.out.println("Coprocessors2: " + Arrays.asList(load.getRsCoprocessors()));
      System.out.println("Replication Load Sink: " + load.getReplicationLoadSink());
      System.out.println("Replication Load Source: " + load.getReplicationLoadSourceList());
      System.out.println("\nRegion Load:\n--------------");
      for (Map.Entry<byte[], RegionLoad>
          entry : // co ClusterStatusExample-4-Regions Iterate over the region details of the
                  // current server.
          load.getRegionsLoad().entrySet()) {
        System.out.println("Region: " + Bytes.toStringBinary(entry.getKey()));
        RegionLoad regionLoad =
            entry
                .getValue(); // co ClusterStatusExample-5-RegionLoad Get the load details for the
                             // current region.
        System.out.println("Name: " + Bytes.toStringBinary(regionLoad.getName()));
        System.out.println("Name (as String): " + regionLoad.getNameAsString());
        System.out.println("No. Requests: " + regionLoad.getRequestsCount());
        System.out.println("No. Read Requests: " + regionLoad.getReadRequestsCount());
        System.out.println("No. Write Requests: " + regionLoad.getWriteRequestsCount());
        System.out.println("No. Stores: " + regionLoad.getStores());
        System.out.println("No. Storefiles: " + regionLoad.getStorefiles());
        System.out.println("Data Locality: " + regionLoad.getDataLocality());
        System.out.println("Storefile Size (MB): " + regionLoad.getStorefileSizeMB());
        System.out.println("Storefile Index Size (MB): " + regionLoad.getStorefileIndexSizeMB());
        System.out.println("Memstore Size (MB): " + regionLoad.getMemStoreSizeMB());
        System.out.println("Root Index Size: " + regionLoad.getRootIndexSizeKB());
        System.out.println("Total Bloom Size: " + regionLoad.getTotalStaticBloomSizeKB());
        System.out.println("Total Index Size: " + regionLoad.getTotalStaticIndexSizeKB());
        System.out.println("Current Compacted Cells: " + regionLoad.getCurrentCompactedKVs());
        System.out.println("Total Compacting Cells: " + regionLoad.getTotalCompactingKVs());
        System.out.println();
      }
    }
  }

  public static class Response {

    public String table = "";
    public List<Request.Column> columns = new ArrayList<>();
    public byte[] key;
  }

  public static class Request {
    public String table = "";
    public byte[] key = new byte[0];
    public List<Column> columns = new ArrayList<>();

    public Optional<Put> getPut() {
      if (valid()) {
        Put p = new Put(key);
        columns.forEach(c -> p.addColumn(c.family.getBytes(), c.qualifier.getBytes(), c.value));
        return Optional.of(p);
      }
      return Optional.empty();
    }

    public boolean valid() {
      return !table.isEmpty()
          && key != null
          && key.length > 0
          && columns.stream().filter(c -> !c.valid()).count() == 0;
    }

    public static class Column {
      public final byte[] value;
      public String family = "";
      public String qualifier = "";

      public Column(String family, String qualifier, byte[] value) {
        this.family = family;
        this.qualifier = qualifier;
        this.value = Arrays.copyOf(value, value.length);
      }

      public boolean valid() {
        return !family.isEmpty() && !qualifier.isEmpty() && value != null;
      }
    }
  }
}
Example #4
0
  public int run(String[] args) throws Exception {
    // printUsage();
    /*
     * SETUP
     */
    Configuration argConf = getConf();
    Hashtable<String, String> confArg = new Hashtable<String, String>();
    setup(confArg, argConf);
    Date currentTime = new Date();
    Date endDate = new Date(new Long(confArg.get("timestamp_stop")));
    Boolean full_run = confArg.get("intermediate").matches("(?i).*true.*");
    Boolean quick_add = confArg.get("quick_add").matches("(?i).*true.*");
    logger.info("Running GeStore");

    // ZooKeeper setup
    Configuration config = HBaseConfiguration.create();
    zkWatcher = new ZooKeeperWatcher(config, "Testing", new HBaseAdmin(config));
    zkInstance =
        new ZooKeeper(
            ZKConfig.getZKQuorumServersString(config),
            config.getInt("zookeeper.session.timeout", -1),
            zkWatcher);

    if (!confArg.get("task_id").isEmpty()) {
      confArg.put("temp_path", confArg.get("temp_path") + confArg.get("task_id"));
    }

    String lockRequest = confArg.get("file_id");
    if (!confArg.get("run_id").isEmpty())
      lockRequest = lockRequest + "_" + confArg.get("run_id") + "_";
    if (!confArg.get("task_id").isEmpty())
      lockRequest = lockRequest + "_" + confArg.get("task_id") + "_";

    // Get type of movement
    toFrom type_move = checkArgs(confArg);
    if (type_move == toFrom.LOCAL2REMOTE && !confArg.get("format").equals("unknown")) {
      List<String> arguments = new ArrayList<String>();
      arguments.add("-Dinput=" + confArg.get("local_path"));
      arguments.add("-Dtable=" + confArg.get("file_id"));
      arguments.add("-Dtimestamp=" + confArg.get("timestamp_stop"));
      arguments.add("-Dtype=" + confArg.get("format"));
      arguments.add("-Dtarget_dir=" + confArg.get("base_path") + "_" + confArg.get("file_id"));
      arguments.add("-Dtemp_hdfs_path=" + confArg.get("temp_path"));
      arguments.add("-Drun_id=" + confArg.get("run_id"));
      if (!confArg.get("run_id").isEmpty()) arguments.add("-Drun_id=" + confArg.get("run_id"));
      if (!confArg.get("task_id").isEmpty()) arguments.add("-Dtask_id=" + confArg.get("task_id"));
      if (quick_add) arguments.add("-Dquick_add=" + confArg.get("quick_add"));
      String lockName = lock(lockRequest);
      String[] argumentString = arguments.toArray(new String[arguments.size()]);
      adddb.main(argumentString);
      unlock(lockName);
      System.exit(0);
    }

    // Database registration

    dbutil db_util = new dbutil(config);
    db_util.register_database(confArg.get("db_name_files"), true);
    db_util.register_database(confArg.get("db_name_runs"), true);
    db_util.register_database(confArg.get("db_name_updates"), true);
    FileSystem hdfs = FileSystem.get(config);
    FileSystem localFS = FileSystem.getLocal(config);

    // Get source type
    confArg.put("source", getSource(db_util, confArg.get("db_name_files"), confArg.get("file_id")));
    confArg.put(
        "database", isDatabase(db_util, confArg.get("db_name_files"), confArg.get("file_id")));
    if (!confArg.get("source").equals("local")
        && type_move == toFrom.REMOTE2LOCAL
        && !confArg.get("timestamp_stop").equals(Integer.toString(Integer.MAX_VALUE))) {
      confArg.put("timestamp_stop", Long.toString(latestVersion(confArg, db_util)));
    }

    /*
     * Get previous timestamp
     */
    Get run_id_get = new Get(confArg.get("run_id").getBytes());
    Result run_get = db_util.doGet(confArg.get("db_name_runs"), run_id_get);
    KeyValue run_file_prev =
        run_get.getColumnLatest(
            "d".getBytes(), (confArg.get("file_id") + "_db_timestamp").getBytes());
    String last_timestamp = new String("0");
    if (null != run_file_prev && !confArg.get("source").equals("local")) {
      long last_timestamp_real = run_file_prev.getTimestamp();
      Long current_timestamp = new Long(confArg.get("timestamp_real"));
      if ((current_timestamp - last_timestamp_real) > 36000) {
        last_timestamp = new String(run_file_prev.getValue());
        Integer lastTimestamp = new Integer(last_timestamp);
        lastTimestamp += 1;
        last_timestamp = lastTimestamp.toString();
        logger.info("Last timestamp: " + last_timestamp + " End data: " + endDate);
        Date last_run = new Date(run_file_prev.getTimestamp());
        if (last_run.before(endDate) && !full_run) {
          confArg.put("timestamp_start", last_timestamp);
        }
      }
    }

    Integer tse = new Integer(confArg.get("timestamp_stop"));
    Integer tss = new Integer(confArg.get("timestamp_start"));
    if (tss > tse) {
      logger.info("No new version of requested file.");
      return 0;
    }

    /*
     * Generate file
     */

    String lockName = lock(lockRequest);

    Get file_id_get = new Get(confArg.get("file_id").getBytes());
    Result file_get = db_util.doGet(confArg.get("db_name_files"), file_id_get);
    if (!file_get.isEmpty()) {
      boolean found =
          hasFile(
              db_util,
              hdfs,
              confArg.get("db_name_files"),
              confArg.get("file_id"),
              getFullPath(confArg));
      if (confArg.get("source").equals("fullfile")) {
        found = false;
      }
      String filenames_put =
          getFileNames(
              db_util, confArg.get("db_name_files"), confArg.get("file_id"), getFullPath(confArg));
      // Filename not found in file database
      if (!found && type_move == toFrom.REMOTE2LOCAL) {
        if (!confArg.get("source").equals("local")) {
          // Generate intermediate file
          if (getFile(hdfs, confArg, db_util) == null) {
            unlock(lockName);
            return 1;
          }
          // Put generated file into file database
          if (!confArg.get("format").equals("fullfile")) {
            putFileEntry(
                db_util,
                hdfs,
                confArg.get("db_name_files"),
                confArg.get("file_id"),
                confArg.get("full_file_name"),
                confArg.get("source"));
          }
        } else {
          logger.warn("Remote file not found, and cannot be generated! File: " + confArg);
          unlock(lockName);
          return 1;
        }
      }
    } else {
      if (type_move == toFrom.REMOTE2LOCAL) {
        logger.warn("Remote file not found, and cannot be generated.");
        unlock(lockName);
        return 1;
      }
    }

    /*
     * Copy file
     * Update tables
     */

    if (type_move == toFrom.LOCAL2REMOTE) {
      if (!confArg.get("format").equals("fullfile")) {
        putFileEntry(
            db_util,
            hdfs,
            confArg.get("db_name_files"),
            confArg.get("file_id"),
            getFullPath(confArg),
            confArg.get("source"));
      }
      putRunEntry(
          db_util,
          confArg.get("db_name_runs"),
          confArg.get("run_id"),
          confArg.get("file_id"),
          confArg.get("type"),
          confArg.get("timestamp_real"),
          confArg.get("timestamp_stop"),
          getFullPath(confArg),
          confArg.get("delimiter"));
      hdfs.copyFromLocalFile(new Path(confArg.get("local_path")), new Path(getFullPath(confArg)));
    } else if (type_move == toFrom.REMOTE2LOCAL) {
      FileStatus[] files = hdfs.globStatus(new Path(getFullPath(confArg) + "*"));
      putRunEntry(
          db_util,
          confArg.get("db_name_runs"),
          confArg.get("run_id"),
          confArg.get("file_id"),
          confArg.get("type"),
          confArg.get("timestamp_real"),
          confArg.get("timestamp_stop"),
          getFullPath(confArg),
          confArg.get("delimiter"));
      unlock(lockName);
      for (FileStatus file : files) {
        Path cur_file = file.getPath();
        Path cur_local_path =
            new Path(new String(confArg.get("local_path") + confArg.get("file_id")));
        String suffix = getSuffix(getFileName(confArg), cur_file.getName());
        if (suffix.length() > 0) {
          cur_local_path = cur_local_path.suffix(new String("." + suffix));
        }
        if (confArg.get("copy").equals("true")) {
          String crc = hdfs.getFileChecksum(cur_file).toString();
          if (checksumLocalTest(cur_local_path, crc)) {
            continue;
          } else {
            hdfs.copyToLocalFile(cur_file, cur_local_path);
            writeChecksum(cur_local_path, crc);
          }
        } else {
          System.out.println(cur_local_path + "\t" + cur_file);
        }
      }
    }
    unlock(lockName);
    return 0;
  }