int integerBreak(int n) { int[] dp = new int[n + 1]; dp[1] = 1; for (int i = 2; i <= n; i++) { for (int j = 1; 2 * j <= i; j++) { dp[i] = Math.max(dp[i], (Math.max(j, dp[j])) * (Math.max(i - j, dp[i - j]))); } } return dp[n]; }
private long getMax(long[] A, int high) { long max = A[0]; for (int i = 1; i <= high; i++) { max = Math.max(max, A[i]); } return max; }
/** * Checks if the specified function correctly handles its argument types, and returns the function * name. * * @param def function definition types are supported. */ protected void check(final Function def) { final String desc = def.toString(); final String name = desc.replaceAll("\\(.*", ""); // test too few, too many, and wrong argument types for (int al = Math.max(def.min - 1, 0); al <= def.max + 1; al++) { final boolean in = al >= def.min && al <= def.max; final StringBuilder qu = new StringBuilder(name + "("); int any = 0; for (int a = 0; a < al; a++) { if (a != 0) qu.append(", "); if (in) { // test arguments if (def.args[a].type == AtomType.STR) { qu.append("1"); } else { // any type (skip test) qu.append("'X'"); if (SeqType.STR.instance(def.args[a])) any++; } } else { // test wrong number of arguments qu.append("'x'"); } } // skip test if all types are arbitrary if ((def.min > 0 || al != 0) && (any == 0 || any != al)) { final String query = qu.append(")").toString(); if (in) error(query, Err.XPTYPE, Err.NODBCTX, Err.NODB); else error(query, Err.XPARGS); } } }
@Test public void test_find_children_2() { for (boolean left : new boolean[] {true, false}) { for (boolean right : new boolean[] {true, false}) { List keys = new ArrayList(); for (int i = 0; i < 100; i += 10) { keys.add(i); } int[] child = new int[keys.size() + (right ? 1 : 0) + (left ? 1 : 0)]; Arrays.fill(child, 11); if (right) child[child.length - 1] = 0; BTreeMap.BNode n = new BTreeMap.DirNode(keys.toArray(), left, right, false, mkchild(child)); for (int i = -10; i < 110; i++) { int pos = BTreeKeySerializer.BASIC.findChildren(n, i); int expected = (i + (left ? 19 : 9)) / 10; expected = Math.max(left ? 1 : 0, expected); expected = Math.min(left ? 11 : 10, expected); assertEquals("i:" + i + " - l:" + left + " - r:" + right, expected, pos); } } } }
private int[] generateRandomSortedArray() { int arraySize = (int) (Math.random() * MAX_ARRAY_SIZE); int[] r = new int[arraySize]; int previousMax = Integer.MIN_VALUE; for (int i = 0; i < arraySize; i++) { r[i] = previousMax = Math.max(previousMax, generateRandomInteger()); } return r; }
@Test public void makeMoveDoesntWorkIfEarlierNotCancelled() { Move move = round.findMoves().get(0); int y = Math.max(size - 1, move.getY()); round.makeMove(y, 0); boolean test = round.makeMove(move.getY(), move.getX()); assertEquals(true, round.makeMove(move.getY2(), move.getX2()) == test); // makeMoven ei pitäisi palauttaa true kahdesti peräkkäin }
/** * Utility to test blob POST, GET, HEAD and DELETE operations for a specified size * * @param contentSize the size of the blob to be tested * @param multipartPost {@code true} if multipart POST is desired, {@code false} otherwise. * @throws Exception */ private void doPostGetHeadDeleteTest(int contentSize, boolean multipartPost) throws Exception { ByteBuffer content = ByteBuffer.wrap(RestTestUtils.getRandomBytes(contentSize)); String serviceId = "postGetHeadDeleteServiceID"; String contentType = "application/octet-stream"; String ownerId = "postGetHeadDeleteOwnerID"; HttpHeaders headers = new DefaultHttpHeaders(); setAmbryHeaders(headers, content.capacity(), 7200, false, serviceId, contentType, ownerId); headers.set(HttpHeaders.Names.CONTENT_LENGTH, content.capacity()); String blobId; byte[] usermetadata = null; if (multipartPost) { usermetadata = UtilsTest.getRandomString(32).getBytes(); blobId = multipartPostBlobAndVerify(headers, content, ByteBuffer.wrap(usermetadata)); } else { headers.add(RestUtils.Headers.USER_META_DATA_HEADER_PREFIX + "key1", "value1"); headers.add(RestUtils.Headers.USER_META_DATA_HEADER_PREFIX + "key2", "value2"); blobId = postBlobAndVerify(headers, content); } getBlobAndVerify(blobId, null, headers, content); getHeadAndVerify(blobId, null, headers); ByteRange range = ByteRange.fromLastNBytes(ThreadLocalRandom.current().nextLong(content.capacity() + 1)); getBlobAndVerify(blobId, range, headers, content); getHeadAndVerify(blobId, range, headers); if (contentSize > 0) { range = ByteRange.fromStartOffset(ThreadLocalRandom.current().nextLong(content.capacity())); getBlobAndVerify(blobId, range, headers, content); getHeadAndVerify(blobId, range, headers); long random1 = ThreadLocalRandom.current().nextLong(content.capacity()); long random2 = ThreadLocalRandom.current().nextLong(content.capacity()); range = ByteRange.fromOffsetRange(Math.min(random1, random2), Math.max(random1, random2)); getBlobAndVerify(blobId, range, headers, content); getHeadAndVerify(blobId, range, headers); } getNotModifiedBlobAndVerify(blobId); getUserMetadataAndVerify(blobId, headers, usermetadata); getBlobInfoAndVerify(blobId, headers, usermetadata); deleteBlobAndVerify(blobId); // check GET, HEAD and DELETE after delete. verifyOperationsAfterDelete(blobId); }
@Test public void testSingleThreaded() { final ARingBuffer<Long> rb = new ARingBuffer<Long>(Long.class, 100); assertFalse(rb.iterator().hasNext()); try { rb.iterator().next(); fail("exception expected"); } catch (Exception exc) { } for (long i = 1; i < 2000; i++) { rb.put(i); final Iterator<Long> iter = rb.iterator(); for (long j = Math.max(1, i - 99); j <= i; j++) { assertTrue(iter.hasNext()); assertEquals(Long.valueOf(j), iter.next()); } assertFalse(iter.hasNext()); } }
@org.junit.Test public void reduceTest() throws Exception { List<String> out_list = new ArrayList<String>(); List<String> in_list = new ArrayList<String>(); out_list.add( "13311361915;59;2015-10-30 13:44:21;2015-10-30 13:44:26;11985;150621;159;0;10.56.0.145"); out_list.add( "13311361915;59;2015-10-30 13:17:27;2015-10-30 14:11:41;872979;2599118;3391;0;10.56.0.145"); out_list.add("13311361915;59;2015-10-30 13:16:27;2015-10-30 13:18:41;2;3;4;0;10.56.0.145"); out_list.add( "13311361915;59;2015-10-30 13:38:42;2015-10-30 13:39:16;2476;2016;5;0;10.56.0.145"); out_list.add( "13311361915;59;2015-10-30 13:38:22;2015-10-30 13:44:19;10825;24953;35;0;10.56.0.145"); in_list.add( "13311361915;59;2015-10-30 13:44:21;2015-10-30 13:44:26;11985;150621;159;0;10.56.0.145"); in_list.add( "13311361915;59;2015-10-30 13:17:27;2015-10-30 14:11:41;872979;2599118;3391;0;10.56.0.145"); in_list.add("13311361915;59;2015-10-30 13:16:27;2015-10-30 13:18:41;2;3;4;0;10.56.0.145"); in_list.add("13311361915;59;2015-10-30 13:38:42;2015-10-30 13:39:16;2476;2016;5;0;10.56.0.145"); in_list.add( "13311361915;59;2015-10-30 13:38:22;2015-10-30 13:44:19;10825;24953;35;0;10.56.0.145"); // 对存储value值的两个list进行排序(按起始时间) Collections.sort( out_list, new Comparator<String>() { public int compare(String o1, String o2) { SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); Date date1 = null; Date date2 = null; try { date1 = sdf.parse(o1.substring(15, 34)); date2 = sdf.parse(o2.substring(15, 34)); } catch (ParseException e) { e.printStackTrace(); } return date1.compareTo(date2); } }); Collections.sort( in_list, new Comparator<String>() { public int compare(String o1, String o2) { SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); Date date1 = null; Date date2 = null; try { date1 = sdf.parse(o1.substring(15, 34)); date2 = sdf.parse(o2.substring(15, 34)); } catch (ParseException e) { e.printStackTrace(); } return date1.compareTo(date2); } }); // 合并后的记录 Set<String> merged_set = new HashSet<String>(); // 合并过的记录 Set<String> repeat_set = new HashSet<String>(); // 日期格式化相关 SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); for (String whole_out : out_list) { if (!repeat_set.contains(whole_out)) { String[] split_out = whole_out.split(";"); long start_out = 0L; long end_out = 0L; long flux_out = 0L; try { start_out = sdf.parse(split_out[2]).getTime(); end_out = sdf.parse(split_out[3]).getTime(); flux_out = Long.parseLong(split_out[6]); } catch (ParseException e) { e.printStackTrace(); } // 合并后的流量 long flux_total = flux_out; // 合并后的起始时间 long start_merged = start_out; // 合并后的结束时间 long end_merged = end_out; for (String whole_in : in_list) { String[] split_in = whole_in.split(";"); long start_in = 0L; long end_in = 0L; long flux_in = 0L; try { start_in = sdf.parse(split_in[2]).getTime(); end_in = sdf.parse(split_in[3]).getTime(); flux_in = Long.parseLong(split_in[6]); } catch (ParseException e) { e.printStackTrace(); } // 不是自己本身的那条记录 if (!whole_in.equals(whole_out)) { // 两条记录相互比较 if (start_merged >= start_in && start_merged <= end_in || start_in >= start_merged && start_in <= end_merged) { flux_total = flux_total + flux_in; start_merged = Math.min(start_in, start_merged); end_merged = Math.max(end_in, end_merged); // 把合并过的记录保存到repeat_set repeat_set.add(whole_in); } } } merged_set.add( split_out[0] + "\t" + sdf.format(new Date(start_merged)) + "\t" + sdf.format(new Date(end_merged)) + "\t" + flux_total + "\t" + split_out[8]); } else { continue; } } for (String record : merged_set) { System.out.println(record); } }
private int testRecordResultCompute(String[][] wordresults) { int nbDuplicateFound = 0; // prepare wordresults List<List<WordResult>> wordResults = new ArrayList<List<WordResult>>(); for (String[] elts : wordresults) { List<WordResult> wrs = new ArrayList<WordResult>(); for (String elt : elts) { WordResult wr = new WordResult(); wr.input = "input " + elt; wr.word = "word " + elt; wr.score = Integer.valueOf(elt); wrs.add(wr); } wordResults.add(wrs); } // --- compute output results as a list RecordResult recRes = new RecordResult(); recRes.record = initializeRecordToSearch(wordresults); recRes.wordResults.addAll(wordResults); List<OutputRecord> expectedOutputRows = null; expectedOutputRows = new ArrayList<OutputRecord>(); SynonymRecordSearcher.RecordResult.computeOutputRows( wordresults.length, new ArrayList<WordResult>(), recRes.wordResults, expectedOutputRows); for (OutputRecord outputRecord : expectedOutputRows) { System.out.println(outputRecord); } // --- test that duplicates are removed when using a set instead of a list Set<OutputRecord> uniques = new HashSet<OutputRecord>(); uniques.addAll(expectedOutputRows); Assert.assertTrue(uniques.size() <= expectedOutputRows.size()); if (uniques.size() < expectedOutputRows.size()) { nbDuplicateFound++; } List<OutputRecord> outputRows = recRes.computeOutputRows(); // --- check some assertions // verify number of results int expectedNbOutput = 1; for (String[] in : wordresults) { expectedNbOutput *= Math.max(in.length, 1); } Assert.assertEquals(expectedNbOutput, expectedOutputRows.size()); Assert.assertTrue(expectedOutputRows.size() >= outputRows.size()); for (OutputRecord outputRecord : outputRows) { boolean found = false; for (OutputRecord expectedRecord : expectedOutputRows) { if (expectedRecord.equals(outputRecord)) { found = true; break; } } Assert.assertTrue("Record not found: " + outputRecord, found); } return nbDuplicateFound; }
public static void assertMaxTimestamp(ColumnFamilyStore cfs, long maxTimestampExpected) { long maxTimestampObserved = Long.MIN_VALUE; for (SSTableReader sstable : cfs.getSSTables()) maxTimestampObserved = Math.max(sstable.getMaxTimestamp(), maxTimestampObserved); assertEquals(maxTimestampExpected, maxTimestampObserved); }