예제 #1
0
  public void storeTravelModeChoiceOutput(ArrayList<String[]> travModeChoice, int inputYear) {
    String tableName = "travelmode_choice_output_" + inputYear;
    String sql =
        "INSERT INTO "
            + tableName
            + " (individual_id, hhold_id, travel_mode, purpose, origin, destination, old_travel_mode) VALUES (?,?,?,?,?,?,?);";

    try {
      List<List<String[]>> travModeBatches = Lists.partition(travModeChoice, BATCH_SIZE);

      for (final List<String[]> travModeBatch : travModeBatches) {
        jdbcTemplate.batchUpdate(
            sql,
            new BatchPreparedStatementSetter() {
              @Override
              public void setValues(PreparedStatement ps, int i) throws SQLException {
                for (int j = 0; j < travModeBatch.get(i).length; j++) {
                  ps.setString(j + 1, travModeBatch.get(i)[j]);
                }
              }

              @Override
              public int getBatchSize() {
                return travModeBatch.size();
              }
            });
      }
    } catch (DataAccessException e) {
      logger.error("Failed to insert travel mode choice output.", e);
    }
  }
예제 #2
0
 @RequestMapping(value = "/api/group", method = RequestMethod.POST, produces = "application/json")
 public List<BroadcastGroup> createGroup(
     @RequestParam("name") String name, @RequestParam("setSize") Integer setSize) {
   if (broadcastRepository.findOne(name) == null) {
     AtomicInteger counter = new AtomicInteger(0);
     List<String> userIds =
         session
             .getUsers()
             .stream()
             .filter(slackUser -> !slackUser.isBot())
             .map(SlackPersona::getId)
             .collect(Collectors.toList());
     List<BroadcastSet> broadcastSets =
         Lists.partition(userIds, setSize)
             .stream()
             .map(
                 set ->
                     new BroadcastSet(
                         counter.incrementAndGet(), set, new ArrayList<BroadcastMessage>()))
             .collect(Collectors.toList());
     broadcastRepository.save(new BroadcastGroup(name, broadcastSets, new Date()));
   } else {
     log.error("Group with name {} already exist", name);
   }
   return broadcastRepository.findAll();
 }
  public static Collection<BatchPoints> create(
      int chunksize, String db, ZeitreihenDTO... zeitreihenDTOs) throws ParseException {
    List<Point> points = new ArrayList<>();
    for (ZeitreihenDTO zeitreihenDTO : zeitreihenDTOs) {
      for (Long timestamp : timestamps(zeitreihenDTO)) {
        GregorianCalendar calendar = new GregorianCalendar();
        calendar.setTimeZone(TimeZone.getTimeZone("UTC"));
        calendar.setTimeInMillis(timestamp);
        int monat = calendar.get(Calendar.MONTH) + 1;
        int jahr = calendar.get(Calendar.YEAR);

        points.add(
            Point.measurement("lastgang")
                .tag("zaehlpunktbezeichnung", zeitreihenDTO.zaehlpunktbezeichnung)
                .tag("commodity", zeitreihenDTO.commodity)
                .tag("zaehlverfahren", zeitreihenDTO.zaehlverfahren)
                .tag("monat", String.valueOf(monat))
                .tag("jahr", String.valueOf(jahr))
                .time(timestamp, TimeUnit.MILLISECONDS)
                .field("value", randomValue())
                .build());
      }
    }

    Collection<BatchPoints> batchPoints = new ArrayList<>();
    Lists.partition(points, chunksize)
        .forEach(
            chunk -> {
              BatchPoints bp = BatchPoints.database(db).retentionPolicy("default").build();

              chunk.forEach(bp::point);
              batchPoints.add(bp);
            });
    return batchPoints;
  }
예제 #4
0
  private void storeNewborns() {
    String sql =
        "INSERT INTO test_newborns_" + year + " (indiv_id, gender, hhold_id) VALUES (?, ?, ?);";

    try {

      List<List<String[]>> newbornsBatches = Lists.partition(newborns, BATCH_SIZE);

      for (final List<String[]> newbornsBatch : newbornsBatches) {
        jdbcTemplate.batchUpdate(
            sql,
            new BatchPreparedStatementSetter() {
              @Override
              public void setValues(PreparedStatement ps, int i) throws SQLException {
                for (int j = 0; j < newbornsBatch.get(i).length; j++) {
                  ps.setString(j + 1, newbornsBatch.get(i)[j]);
                }
              }

              @Override
              public int getBatchSize() {
                return newbornsBatch.size();
              }
            });
      }
    } catch (DataAccessException e) {
      logger.error("Failed to insert newborns output.", e);
    } finally {
      // Clear data structure
      newborns.clear();
    }
  }
예제 #5
0
  private void storeLinkDelayOutput() {
    String sql =
        "INSERT INTO transims_output_linkdelay_"
            + year
            + " (link, dir, depart, arrive, flow, time) VALUES (?, ?, ?, ?, ?, ?);";

    try {
      List<List<String[]>> delayOutputArrayListBatches =
          Lists.partition(linkDelayOutputArrayList, BATCH_SIZE);

      for (final List<String[]> delayOutputArrayListBatch : delayOutputArrayListBatches) {
        jdbcTemplate.batchUpdate(
            sql,
            new BatchPreparedStatementSetter() {
              @Override
              public void setValues(PreparedStatement ps, int i) throws SQLException {
                for (int j = 0; j < delayOutputArrayListBatch.get(i).length; j++) {
                  ps.setString(j + 1, delayOutputArrayListBatch.get(i)[j]);
                }
              }

              @Override
              public int getBatchSize() {
                return delayOutputArrayListBatch.size();
              }
            });
      }
    } catch (DataAccessException e) {
      logger.error("Failed to insert link delay output.", e);
    } finally {
      // clear data structure
      linkDelayOutputArrayList.clear();
    }
  }
예제 #6
0
  private void storeTimePlanOutput() {
    String sql =
        "INSERT INTO transims_output_timeplans_"
            + year
            + " (household, person, trip, mode, depart, purpose,trip_length) VALUES (?, ?, ?, ?, ?, ?, ?);";

    try {
      List<List<String[]>> timeplanOutputArrayListBatches =
          Lists.partition(timeplanOutputArrayList, BATCH_SIZE);

      for (final List<String[]> timeplanOutputArrayListBatch : timeplanOutputArrayListBatches) {
        jdbcTemplate.batchUpdate(
            sql,
            new BatchPreparedStatementSetter() {
              @Override
              public void setValues(PreparedStatement ps, int i) throws SQLException {
                for (int j = 0; j < timeplanOutputArrayListBatch.get(i).length; j++) {
                  ps.setString(j + 1, timeplanOutputArrayListBatch.get(i)[j]);
                }
              }

              @Override
              public int getBatchSize() {
                return timeplanOutputArrayListBatch.size();
              }
            });
      }
    } catch (DataAccessException e) {
      logger.error("Failed to insert time plan output.", e);
    } finally {
      // clear data structure
      timeplanOutputArrayList.clear();
    }
  }
예제 #7
0
  /**
   * updates satisfaction for each travel zone. first query the synthetic_population_ouput table in
   * the same year, then update the data into travel_zone_output table.
   */
  public void storeSatisfaction() {

    String sqlGetData =
        "select tz_2006 as travelZone,avg(satisfaction) as averageSatisfaction from synthetic_population_output_"
            + this.year
            + " group by tz_2006;";
    String sqlSetData =
        "update travel_zone_output_" + this.year + " set satisfaction = ? where tz_2006 =? ;";

    try {
      List<List<Map<String, Object>>> satisfactionBatches =
          Lists.partition(jdbcTemplate.queryForList(sqlGetData), BATCH_SIZE);

      for (final List<Map<String, Object>> satisfactionBatch : satisfactionBatches) {
        jdbcTemplate.batchUpdate(
            sqlSetData,
            new BatchPreparedStatementSetter() {
              @Override
              public void setValues(PreparedStatement ps, int i) throws SQLException {
                Map<String, Object> result = satisfactionBatch.get(i);
                ps.setDouble(1, (Double) result.get("averageSatisfaction"));
                ps.setInt(2, (Integer) result.get("travelZone"));
              }

              @Override
              public int getBatchSize() {
                return satisfactionBatch.size();
              }
            });
      }
    } catch (DataAccessException e) {
      logger.error("Failed to store satisfaction", e);
    }
  }
예제 #8
0
  private void storeEmigrants() {
    String sql =
        "INSERT INTO test_emigrants_"
            + year
            + " (indiv_id, age, gender, hhold_id, hhold_type, nbr_needed, hhold_income, ownership, year_dwelling_bought) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?);";

    try {

      List<List<String[]>> emigrantBatches = Lists.partition(testEmigrants, BATCH_SIZE);

      for (final List<String[]> emigrantBatch : emigrantBatches) {
        jdbcTemplate.batchUpdate(
            sql,
            new BatchPreparedStatementSetter() {
              @Override
              public void setValues(PreparedStatement ps, int i) throws SQLException {
                for (int j = 0; j < emigrantBatch.get(i).length; j++) {
                  ps.setString(j + 1, emigrantBatch.get(i)[j]);
                }
              }

              @Override
              public int getBatchSize() {
                return emigrantBatch.size();
              }
            });
      }
    } catch (DataAccessException e) {
      logger.error("Failed to insert emigrants output.", e);
    } finally {
      // clear data structure
      testEmigrants.clear();
    }
  }
예제 #9
0
  private void storeDeadPeople() {

    String sql =
        "INSERT INTO test_deadpeople_"
            + year
            + " (indiv_id, age, hhold_id, hhold_relationship, gender) VALUES (?, ?, ?, ?, ?);";

    try {

      List<List<String[]>> deadPeopleBatches = Lists.partition(deadPeople, BATCH_SIZE);

      for (final List<String[]> deadPeopleBatch : deadPeopleBatches) {
        jdbcTemplate.batchUpdate(
            sql,
            new BatchPreparedStatementSetter() {
              @Override
              public void setValues(PreparedStatement ps, int i) throws SQLException {
                for (int j = 0; j < deadPeopleBatch.get(i).length; j++) {
                  ps.setString(j + 1, deadPeopleBatch.get(i)[j]);
                }
              }

              @Override
              public int getBatchSize() {
                return deadPeopleBatch.size();
              }
            });
      }
    } catch (DataAccessException e) {
      logger.error("Failed to insert deadpeople output.", e);
    } finally {
      // Clear data structure
      deadPeople.clear();
    }
  }
예제 #10
0
 /**
  * Abort jobs for a given list of job {@link UUID}s. If the size of the list is larger that the
  * {@code _batchRequestSize}, it will partition the list into smaller lists to abort separately.
  *
  * @param uuids specifies a list of job {@link UUID}s expected to abort.
  * @throws JobClientException
  */
 public void abort(Collection<UUID> uuids) throws JobClientException {
   final List<NameValuePair> allParams = new ArrayList<NameValuePair>(uuids.size());
   for (UUID uuid : uuids) {
     allParams.add(new BasicNameValuePair("job", uuid.toString()));
   }
   // Partition a large query into small queries.
   for (final List<NameValuePair> params : Lists.partition(allParams, _batchRequestSize)) {
     HttpRequestBase httpRequest;
     try {
       URIBuilder uriBuilder = new URIBuilder(_uri);
       uriBuilder.addParameters(params);
       httpRequest = new HttpDelete(uriBuilder.build());
     } catch (URISyntaxException e) {
       throw releaseAndCreateException(
           null, "Can not submit DELETE request " + params + " via uri " + _uri, e);
     }
     HttpResponse httpResponse;
     try {
       httpResponse = _httpClient.execute(httpRequest);
     } catch (IOException e) {
       throw releaseAndCreateException(
           httpRequest, "Can not submit DELETE request " + params + " via uri " + _uri, e);
     }
     // Check status code.
     final StatusLine statusLine = httpResponse.getStatusLine();
     // Base on the decision graph
     // http://clojure-liberator.github.io/liberator/tutorial/decision-graph.html
     // If jobs are aborted successfully, the returned status code is 204.
     if (statusLine.getStatusCode() != HttpStatus.SC_NO_CONTENT) {
       throw releaseAndCreateException(
           httpRequest,
           "The response of DELETE request "
               + params
               + " via uri "
               + _uri
               + ": "
               + statusLine.getReasonPhrase()
               + ", "
               + statusLine.getStatusCode(),
           null);
     }
     // Parse the response.
     try {
       // Parse the response to string.
       final HttpEntity entity = httpResponse.getEntity();
       if (null != entity) {
         final String response = EntityUtils.toString(entity);
         if (_log.isDebugEnabled()) {
           _log.debug("Response String for aborting jobs " + uuids + " is " + response);
         }
       }
     } catch (ParseException | IOException e) {
       throw new JobClientException(
           "Can not parse the response for DELETE request " + params + " via uri " + _uri, e);
     } finally {
       httpRequest.releaseConnection();
     }
   }
 }
  @Test
  public void whenPartitionList_thenPartitioned() {
    final List<String> names = Lists.newArrayList("John", "Jane", "Adam", "Tom", "Viki", "Tyler");
    final List<List<String>> result = Lists.partition(names, 2);

    assertEquals(3, result.size());
    assertThat(result.get(0), contains("John", "Jane"));
    assertThat(result.get(1), contains("Adam", "Tom"));
    assertThat(result.get(2), contains("Viki", "Tyler"));
  }
 @Override
 protected void createPartitionSublists() {
   List<String> fileLocations =
       ((FormatSelection) scanRel.getDrillTable().getSelection()).getAsFiles();
   List<PartitionLocation> locations = new LinkedList<>();
   for (String file : fileLocations) {
     locations.add(new DFSPartitionLocation(MAX_NESTED_SUBDIRS, getBaseTableLocation(), file));
   }
   locationSuperList = Lists.partition(locations, PartitionDescriptor.PARTITION_BATCH_SIZE);
   sublistsCreated = true;
 }
  @Override
  @Transactional
  public StageResult stage(List<MotleyObject> records, String templateDB, String templateTable) {
    String stageTableName = StageUtils.getStageTableName(templateTable);
    String createTableDDL =
        "SELECT date, name, id, price, amount, fx_rate, is_valid, knowledge_time INTO "
            + stageTableName
            + " FROM "
            + templateDB
            + ".."
            + templateTable;
    getJdbcTemplate().update(createTableDDL);

    String insertDML =
        "INSERT INTO "
            + stageTableName
            + "(date, name, id, price, amount, fx_rate, is_valid, knowledge_time) "
            + " VALUES (?, ?, ?, ?, ?, ?, ?, ?)";
    List<List<MotleyObject>> partitions = Lists.partition(records, batchSize);
    for (final List<MotleyObject> partition : partitions) {
      BatchPreparedStatementSetter pss =
          new BatchPreparedStatementSetter() {

            @Override
            public void setValues(PreparedStatement ps, int i) throws SQLException {
              MotleyObject rec = partition.get(i);
              ps.setDate(1, new Date(rec.getDate().getTime()));
              ps.setString(2, rec.getName());
              ps.setInt(3, rec.getId());
              ps.setBigDecimal(4, rec.getPrice());
              ps.setBigDecimal(5, rec.getAmount());
              ps.setBigDecimal(6, rec.getFxRate());
              ps.setBoolean(7, rec.getIsValid());
              ps.setDate(8, new Date(rec.getKnowledgeTime().getTime()));
            }

            @Override
            public int getBatchSize() {
              return partition.size();
            }
          };
      getJdbcTemplate().batchUpdate(insertDML, pss);
    }

    StageResult result = new StageResult();
    result.setDbName(templateDB);
    result.setTableName(stageTableName);
    return result;
  }
예제 #14
0
  @Test
  public void testRateLimiter() throws Exception {
    RateLimiter limiter = RateLimiter.create(500);
    long startTime = System.currentTimeMillis();
    for (int i = 0; i < 100; i++) {
      limiter.acquire();
      System.out.println("current is " + i);
    }
    long endTime = System.currentTimeMillis();
    System.out.println(endTime - startTime);

    ArrayList<String> arrayList = Lists.newArrayList("1", "2", "3", "4", "5");
    List<List<String>> partition = Lists.partition(arrayList, 2);
    System.out.println(partition.size());
  }
  /**
   * Get list of documents for given list of numeric ids. The result list is ordered by sequence
   * number, and only the current revisions are returned.
   *
   * @param docIds given list of internal ids
   * @return list of documents ordered by sequence number
   */
  List<DocumentRevision> getDocumentsWithInternalIds(List<Long> docIds) {
    Preconditions.checkNotNull(docIds, "Input document internal id list can not be null");
    if (docIds.size() == 0) {
      return Collections.emptyList();
    }

    final String GET_DOCUMENTS_BY_INTERNAL_IDS =
        "SELECT "
            + FULL_DOCUMENT_COLS
            + " FROM revs, docs "
            + "WHERE revs.doc_id IN ( %s ) AND current = 1 AND docs.doc_id = revs.doc_id";

    // Split into batches because SQLite has a limit on the number
    // of placeholders we can use in a single query. 999 is the default
    // value, but it can be lower. It's hard to find this out from Java,
    // so we use a value much lower.
    List<DocumentRevision> result = new ArrayList<DocumentRevision>(docIds.size());

    List<List<Long>> batches = Lists.partition(docIds, SQLITE_QUERY_PLACEHOLDERS_LIMIT);
    for (List<Long> batch : batches) {
      String sql =
          String.format(
              GET_DOCUMENTS_BY_INTERNAL_IDS, SQLDatabaseUtils.makePlaceholders(batch.size()));
      String[] args = new String[batch.size()];
      for (int i = 0; i < batch.size(); i++) {
        args[i] = Long.toString(batch.get(i));
      }
      result.addAll(getRevisionsFromRawQuery(sql, args));
    }

    // Contract is to sort by sequence number, which we need to do
    // outside the sqlDb as we're batching requests.
    Collections.sort(
        result,
        new Comparator<DocumentRevision>() {
          @Override
          public int compare(
              DocumentRevision documentRevision, DocumentRevision documentRevision2) {
            long a = documentRevision.getSequence();
            long b = documentRevision2.getSequence();
            return (int) (a - b);
          }
        });

    return result;
  }
예제 #16
0
  private void storeSPAfterReloc() {
    String sql =
        "INSERT INTO test_sp_after_reloc_"
            + year
            + " (indiv_id, "
            + "age, "
            + "gender, "
            + "indiv_income, "
            + "hhold_relationship, "
            + "hhold_id, "
            + "hhold_type, "
            + "tenure, "
            + "nbr_needed, "
            + "ownership, "
            + "tz, "
            + "nbr_having"
            + ") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);";

    try {
      List<List<String[]>> spAfterRelocatedBatches = Lists.partition(testSPAfterReloc, BATCH_SIZE);

      for (final List<String[]> spAfterRelocatedBatch : spAfterRelocatedBatches) {
        jdbcTemplate.batchUpdate(
            sql,
            new BatchPreparedStatementSetter() {
              @Override
              public void setValues(PreparedStatement ps, int i) throws SQLException {
                for (int j = 0; j < spAfterRelocatedBatch.get(i).length; j++) {
                  ps.setString(j + 1, spAfterRelocatedBatch.get(i)[j]);
                }
              }

              @Override
              public int getBatchSize() {
                return spAfterRelocatedBatch.size();
              }
            });
      }
    } catch (DataAccessException e) {
      logger.error("Failed to insert sp_after_reloc output.", e);
    } finally {
      // clear data structure
      testSPAfterReloc.clear();
    }
  }
예제 #17
0
 public Collection<ComponentDto> selectComponentsByIds(Collection<Long> ids) {
   if (ids.isEmpty()) {
     return Collections.emptyList();
   }
   SqlSession session = mybatis.openSession(false);
   try {
     List<ComponentDto> components = newArrayList();
     List<List<Long>> partitionList = Lists.partition(newArrayList(ids), 1000);
     for (List<Long> partition : partitionList) {
       List<ComponentDto> dtos =
           session.getMapper(ResourceMapper.class).selectComponentsByIds(partition);
       components.addAll(dtos);
     }
     return components;
   } finally {
     MyBatis.closeQuietly(session);
   }
 }
예제 #18
0
  private void storeEquity() {
    String sql =
        "INSERT INTO test_equity_"
            + year
            + " (hhold_id, "
            + "hhold_type, "
            + "hhold_income, "
            + "ownership, "
            + "savings_if_rent, "
            + "year_dwelling_bought, "
            + "equity, "
            + "yearly_mortgage, "
            + "dwelling_price, "
            + "duty_paid"
            + ") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?);";

    try {

      List<List<String[]>> equityBatches = Lists.partition(testEquity, BATCH_SIZE);

      for (final List<String[]> equityBatch : equityBatches) {
        jdbcTemplate.batchUpdate(
            sql,
            new BatchPreparedStatementSetter() {
              @Override
              public void setValues(PreparedStatement ps, int i) throws SQLException {
                for (int j = 0; j < equityBatch.get(i).length; j++) {
                  ps.setString(j + 1, equityBatch.get(i)[j]);
                }
              }

              @Override
              public int getBatchSize() {
                return equityBatch.size();
              }
            });
      }
    } catch (DataAccessException e) {
      logger.error("Failed to insert equity output.", e);
    } finally {
      // clear data structure
      testEquity.clear();
    }
  }
예제 #19
0
  public boolean execute(ICommandSender sender, String[] args) {
    IPlayer player = (IPlayer) sender;
    if (player.hasPermission(PermissionNames.ADMIN_EXPIRED)) {
      if (manager.isPlotWorld(player.getWorld())) {
        PlotMapInfo pmi = manager.getMap(player);
        if (pmi.getDaysToExpiration() != 0) {
          int page = 1;

          if (args.length == 2) {
            page = Integer.parseInt(args[1]);
          }
          List<List<Plot>> partition =
              Lists.partition(plugin.getSqlManager().getExpiredPlots(player.getWorld()), 10);
          if (partition.isEmpty()) {
            player.sendMessage(C("MsgNoPlotExpired"));
          } else {
            player.sendMessage(C("MsgExpiredPlotsPage", page, partition.size()));
            for (Plot plot : partition.get(page - 1)) {
              assert plot.getExpiredDate() != null;
              player.sendMessage(
                  plot.getId()
                      + " -> "
                      + plot.getOwner()
                      + " @ "
                      + plot.getExpiredDate().toString());
            }
          }
        } else {
          return true;
        }
      } else {
        player.sendMessage(C("NotPlotWorld"));
        return true;
      }
    } else {
      return false;
    }
    return true;
  }
예제 #20
0
  private void storeHholdNotReloc() {
    String sql =
        "INSERT INTO test_notreloc_hhold_"
            + year
            + " (hhold_id, "
            + "nbr_needed, "
            + "nbr_having"
            + ") VALUES (?, ?, ?);";

    try {

      List<List<String[]>> notRelocatedHholdBatches =
          Lists.partition(testNotRelocHhold, BATCH_SIZE);

      for (final List<String[]> notRelocatedHholdBatch : notRelocatedHholdBatches) {
        jdbcTemplate.batchUpdate(
            sql,
            new BatchPreparedStatementSetter() {
              @Override
              public void setValues(PreparedStatement ps, int i) throws SQLException {
                for (int j = 0; j < notRelocatedHholdBatch.get(i).length; j++) {
                  ps.setString(j + 1, notRelocatedHholdBatch.get(i)[j]);
                }
              }

              @Override
              public int getBatchSize() {
                return notRelocatedHholdBatch.size();
              }
            });
      }
    } catch (DataAccessException e) {
      logger.error("Failed to insert notreloc_hhold output.", e);
    } finally {
      // clear data structure
      testNotRelocHhold.clear();
    }
  }
예제 #21
0
  public void storeOccupancyCounts(Map<Integer, int[]> occupancyRate, int year) {

    String sql =
        "INSERT INTO dwelling_occupancy_counts_"
            + year
            + "( tz, _1bd, _2bd, _3bd, _4bd) VALUES (?, ?, ?, ?, ?);";

    try {
      List<Entry<Integer, int[]>> occupancyRateSet = new ArrayList<>();
      occupancyRateSet.addAll(occupancyRate.entrySet());

      List<List<Entry<Integer, int[]>>> occupancyBatches =
          Lists.partition(occupancyRateSet, BATCH_SIZE);

      for (final List<Entry<Integer, int[]>> occupancyBatch : occupancyBatches) {
        jdbcTemplate.batchUpdate(
            sql,
            new BatchPreparedStatementSetter() {
              @Override
              public void setValues(PreparedStatement ps, int i) throws SQLException {
                Map.Entry<Integer, int[]> occupancy = occupancyBatch.get(i);
                ps.setInt(1, occupancy.getKey());
                for (int j = 0; j < occupancy.getValue().length; j++) {
                  ps.setInt(j + 2, occupancy.getValue()[j]);
                }
              }

              @Override
              public int getBatchSize() {
                return occupancyBatch.size();
              }
            });
      }
    } catch (DataAccessException e) {
      logger.error("Failed to update occupancy counts.", e);
    }
  }
예제 #22
0
  public Collection<Component> findByIds(Collection<Long> ids) {
    if (ids.isEmpty()) {
      return Collections.emptyList();
    }
    SqlSession session = mybatis.openSession();
    try {

      List<ResourceDto> resources = newArrayList();
      List<List<Long>> idsPartition = Lists.partition(newArrayList(ids), 1000);
      for (List<Long> partition : idsPartition) {
        List<ResourceDto> dtos =
            session.getMapper(ResourceMapper.class).selectResourcesById(partition);
        resources.addAll(dtos);
      }

      Collection<Component> components = newArrayList();
      for (ResourceDto resourceDto : resources) {
        components.add(toComponent(resourceDto));
      }
      return components;
    } finally {
      MyBatis.closeQuietly(session);
    }
  }
예제 #23
0
 public List<List<FloatDataSet>> batchBy(int num) {
   return Lists.partition(asList(), num);
 }
예제 #24
0
 /**
  * Sorts the dataset by label: Splits the data applyTransformToDestination such that examples are
  * sorted by their labels. A ten label dataset would produce lists with batches like the
  * following: x1 y = 1 x2 y = 2 ... x10 y = 10
  *
  * @return a list of data sets partitioned by outcomes
  */
 public List<List<FloatDataSet>> sortAndBatchByNumLabels() {
   sortByLabel();
   return Lists.partition(asList(), numOutcomes());
 }
예제 #25
0
 public List<List<FloatDataSet>> batchByNumLabels() {
   return Lists.partition(asList(), numOutcomes());
 }
  /**
   * Adds information of last attachment to the resources. To be compatible with the AWS
   * implementation of the same crawler, add the information to the JANITOR_META tag. It always uses
   * the latest information to update the tag in this resource (not writing back to AWS) no matter
   * if the tag exists.
   *
   * @param resources the volume resources
   */
  private void addLastAttachmentInfo(List<Resource> resources) {
    Validate.notNull(resources);
    LOGGER.info(
        String.format("Updating the latest attachment info for %d resources", resources.size()));
    Map<String, List<Resource>> regionToResources = Maps.newHashMap();
    for (Resource resource : resources) {
      List<Resource> regionalList = regionToResources.get(resource.getRegion());
      if (regionalList == null) {
        regionalList = Lists.newArrayList();
        regionToResources.put(resource.getRegion(), regionalList);
      }
      regionalList.add(resource);
    }
    for (Map.Entry<String, List<Resource>> entry : regionToResources.entrySet()) {
      LOGGER.info(
          String.format(
              "Updating the latest attachment info for %d resources in region %s",
              resources.size(), entry.getKey()));
      for (List<Resource> batch : Lists.partition(entry.getValue(), BATCH_SIZE)) {
        LOGGER.info(String.format("Processing batch of size %d", batch.size()));
        String batchUrl = getBatchUrl(entry.getKey(), batch);
        JsonNode batchResult = null;
        try {
          batchResult = eddaClient.getJsonNodeFromUrl(batchUrl);
        } catch (IOException e) {
          LOGGER.error("Failed to get response for the batch.", e);
        }
        Map<String, Resource> idToResource = Maps.newHashMap();
        for (Resource resource : batch) {
          idToResource.put(resource.getId(), resource);
        }
        if (batchResult == null || !batchResult.isArray()) {
          throw new RuntimeException(
              String.format(
                  "Failed to get valid document from %s, got: %s", batchUrl, batchResult));
        }

        Set<String> processedIds = Sets.newHashSet();
        for (Iterator<JsonNode> it = batchResult.getElements(); it.hasNext(); ) {
          JsonNode elem = it.next();
          JsonNode data = elem.get("data");
          String volumeId = data.get("volumeId").getTextValue();
          Resource resource = idToResource.get(volumeId);
          JsonNode attachments = data.get("attachments");

          Validate.isTrue(attachments.isArray() && attachments.size() > 0);
          JsonNode attachment = attachments.get(0);

          JsonNode ltime = elem.get("ltime");
          if (ltime == null || ltime.isNull()) {
            continue;
          }
          DateTime detachTime = new DateTime(ltime.asLong());
          processedIds.add(volumeId);
          setAttachmentInfo(volumeId, attachment, detachTime, resource);
        }

        for (Map.Entry<String, Resource> volumeEntry : idToResource.entrySet()) {
          String id = volumeEntry.getKey();
          if (!processedIds.contains(id)) {
            Resource resource = volumeEntry.getValue();
            LOGGER.info(
                String.format(
                    "Volume %s never was attached, use createTime %s as the detachTime",
                    id, resource.getLaunchTime()));
            setAttachmentInfo(id, null, new DateTime(resource.getLaunchTime().getTime()), resource);
          }
        }
      }
    }
  }
예제 #27
0
 /**
  * Query jobs for a given list of job {@link UUID}s. If the list size is larger that the {@code
  * _batchRequestSize}, it will partition the list into smaller lists and query them respectively
  * and return all query results together.
  *
  * @param uuids specifies a list of job {@link UUID}s expected to query.
  * @return a {@link ImmutableMap} from job {@link UUID} to {@link Job}.
  * @throws JobClientException
  */
 public ImmutableMap<UUID, Job> query(Collection<UUID> uuids) throws JobClientException {
   final List<NameValuePair> allParams = new ArrayList<NameValuePair>(uuids.size());
   for (UUID uuid : uuids) {
     allParams.add(new BasicNameValuePair("job", uuid.toString()));
   }
   final ImmutableMap.Builder<UUID, Job> UUIDToJob = ImmutableMap.builder();
   // Partition a large query into small queries.
   for (final List<NameValuePair> params : Lists.partition(allParams, _batchRequestSize)) {
     HttpResponse httpResponse;
     HttpRequestBase httpRequest;
     try {
       URIBuilder uriBuilder = new URIBuilder(_uri);
       uriBuilder.addParameters(params);
       httpRequest = new HttpGet(uriBuilder.build());
       httpResponse = _httpClient.execute(httpRequest);
     } catch (IOException | URISyntaxException e) {
       throw releaseAndCreateException(
           null, "Can not submit GET request " + params + " via uri " + _uri, e);
     }
     // Check status code.
     final StatusLine statusLine = httpResponse.getStatusLine();
     // Base on the decision graph
     // http://clojure-liberator.github.io/liberator/tutorial/decision-graph.html
     // The status code for the proper GET response is 200.
     if (statusLine.getStatusCode() != HttpStatus.SC_OK) {
       throw releaseAndCreateException(
           httpRequest,
           "The response of GET request "
               + params
               + " via uri "
               + _uri
               + ": "
               + statusLine.getReasonPhrase()
               + ", "
               + statusLine.getStatusCode(),
           null);
     }
     // Parse the response.
     String response = null;
     try {
       // parse the response to string.
       final HttpEntity entity = httpResponse.getEntity();
       response = EntityUtils.toString(entity);
       // Ensure that the entity content has been fully consumed and the underlying stream has been
       // closed.
       EntityUtils.consume(entity);
       for (Job job : Job.parseFromJSON(response, _instanceDecorator)) {
         UUIDToJob.put(job.getUUID(), job);
       }
     } catch (JSONException | ParseException | IOException e) {
       throw new JobClientException(
           "Can not parse the response = "
               + response
               + " for GET request "
               + params
               + " via uri "
               + _uri,
           e);
     } finally {
       httpRequest.releaseConnection();
     }
   }
   return UUIDToJob.build();
 }
예제 #28
0
 /**
  * Partitions the data applyTransformToDestination by the specified number.
  *
  * @param num the number to split by
  * @return the partitioned data applyTransformToDestination
  */
 public List<FloatDataSet> dataSetBatches(int num) {
   List<List<FloatDataSet>> list = Lists.partition(asList(), num);
   List<FloatDataSet> ret = new ArrayList<>();
   for (List<FloatDataSet> l : list) ret.add(FloatDataSet.merge(l));
   return ret;
 }