@Test public void testDeleteMissing() { TaskAttemptContext taskAttemptContext = getTaskAttemptContext(config); JobContext jobContext = new JobContextImpl( taskAttemptContext.getConfiguration(), taskAttemptContext.getTaskAttemptID().getJobID()); Configuration conf = jobContext.getConfiguration(); String sourceBase; String targetBase; FileSystem fs = null; try { OutputCommitter committer = new CopyCommitter(null, taskAttemptContext); fs = FileSystem.get(conf); sourceBase = TestDistCpUtils.createTestSetup(fs, FsPermission.getDefault()); targetBase = TestDistCpUtils.createTestSetup(fs, FsPermission.getDefault()); String targetBaseAdd = TestDistCpUtils.createTestSetup(fs, FsPermission.getDefault()); fs.rename(new Path(targetBaseAdd), new Path(targetBase)); DistCpOptions options = new DistCpOptions(Arrays.asList(new Path(sourceBase)), new Path("/out")); options.setSyncFolder(true); options.setDeleteMissing(true); options.appendToConf(conf); CopyListing listing = new GlobbedCopyListing(conf, CREDENTIALS); Path listingFile = new Path("/tmp1/" + String.valueOf(rand.nextLong())); listing.buildListing(listingFile, options); conf.set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH, targetBase); conf.set(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH, targetBase); committer.commitJob(jobContext); if (!TestDistCpUtils.checkIfFoldersAreInSync(fs, targetBase, sourceBase)) { Assert.fail("Source and target folders are not in sync"); } if (!TestDistCpUtils.checkIfFoldersAreInSync(fs, sourceBase, targetBase)) { Assert.fail("Source and target folders are not in sync"); } // Test for idempotent commit committer.commitJob(jobContext); if (!TestDistCpUtils.checkIfFoldersAreInSync(fs, targetBase, sourceBase)) { Assert.fail("Source and target folders are not in sync"); } if (!TestDistCpUtils.checkIfFoldersAreInSync(fs, sourceBase, targetBase)) { Assert.fail("Source and target folders are not in sync"); } } catch (Throwable e) { LOG.error("Exception encountered while testing for delete missing", e); Assert.fail("Delete missing failure"); } finally { TestDistCpUtils.delete(fs, "/tmp1"); conf.set(DistCpConstants.CONF_LABEL_DELETE_MISSING, "false"); } }
@After public void cleanupMetaFolder() { Path meta = new Path("/meta"); try { if (cluster.getFileSystem().exists(meta)) { cluster.getFileSystem().delete(meta, true); Assert.fail("Expected meta folder to be deleted"); } } catch (IOException e) { LOG.error("Exception encountered while cleaning up folder", e); Assert.fail("Unable to clean up meta folder"); } }
@Test public void testPreserveStatus() { TaskAttemptContext taskAttemptContext = getTaskAttemptContext(config); JobContext jobContext = new JobContextImpl( taskAttemptContext.getConfiguration(), taskAttemptContext.getTaskAttemptID().getJobID()); Configuration conf = jobContext.getConfiguration(); String sourceBase; String targetBase; FileSystem fs = null; try { OutputCommitter committer = new CopyCommitter(null, taskAttemptContext); fs = FileSystem.get(conf); FsPermission sourcePerm = new FsPermission((short) 511); FsPermission initialPerm = new FsPermission((short) 448); sourceBase = TestDistCpUtils.createTestSetup(fs, sourcePerm); targetBase = TestDistCpUtils.createTestSetup(fs, initialPerm); DistCpOptions options = new DistCpOptions(Arrays.asList(new Path(sourceBase)), new Path("/out")); options.preserve(FileAttribute.PERMISSION); options.appendToConf(conf); CopyListing listing = new GlobbedCopyListing(conf, CREDENTIALS); Path listingFile = new Path("/tmp1/" + String.valueOf(rand.nextLong())); listing.buildListing(listingFile, options); conf.set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH, targetBase); committer.commitJob(jobContext); if (!checkDirectoryPermissions(fs, targetBase, sourcePerm)) { Assert.fail("Permission don't match"); } // Test for idempotent commit committer.commitJob(jobContext); if (!checkDirectoryPermissions(fs, targetBase, sourcePerm)) { Assert.fail("Permission don't match"); } } catch (IOException e) { LOG.error("Exception encountered while testing for preserve status", e); Assert.fail("Preserve status failure"); } finally { TestDistCpUtils.delete(fs, "/tmp1"); } }
private void assertTableContent( final JdbcContentPersistenceService tested, final String sysContentType, final String id, final Date expectedUpdated) { final String tablename = tested.getTableName(sysContentType); try (final Connection conn = this.getTested().searchiskoDs.getConnection(); final PreparedStatement statement = conn.prepareStatement( String.format( "select sys_content_type, updated from %s where id = ?", tablename))) { statement.setString(1, id); try (final ResultSet rs = statement.executeQuery()) { Assert.assertTrue(rs.next()); Assert.assertEquals(sysContentType, rs.getString(1)); Timestamp actualTimestamp = rs.getTimestamp(2); if (expectedUpdated != null) { Assert.assertEquals(new Timestamp(expectedUpdated.getTime()), actualTimestamp); } else { Assert.assertNotNull(actualTimestamp); } } } catch (SQLException e) { Assert.fail(e.getMessage()); } }
private void checkConsistency() { ConsistencyReport report = new ConsistencyChecker((LocalObjectContainer) container()).checkSlotConsistency(); if (!report.consistent()) { Assert.fail(report.toString()); } }
/** * Directly create a table in the database. This can be used to simulate a second EAP node * creating a new table. * * @see JdbcContentPersistenceService#ensureTableExists * @param tableName table name to create */ protected void createTable(String tableName) { try { final Connection conn = this.getTested().searchiskoDs.getConnection(); conn.prepareStatement("create table " + tableName).execute(); conn.commit(); } catch (SQLException e) { Assert.fail(e.getMessage()); } }
protected void waitForDelete(String tableName) throws Exception { for (int i = 0; i < MAX_WAIT_ITERATION; i++) { if (!admin.tableExists(tableName)) { return; } Thread.sleep(WAIT_INTERVAL); } Assert.fail(getMethodName() + " failed"); }
@Before public void createMetaFolder() { config.set(DistCpConstants.CONF_LABEL_META_FOLDER, "/meta"); Path meta = new Path("/meta"); try { cluster.getFileSystem().mkdirs(meta); } catch (IOException e) { LOG.error("Exception encountered while creating meta folder", e); Assert.fail("Unable to create meta folder"); } }
@Test public void testAtomicCommitExistingFinal() { TaskAttemptContext taskAttemptContext = getTaskAttemptContext(config); JobContext jobContext = new JobContextImpl( taskAttemptContext.getConfiguration(), taskAttemptContext.getTaskAttemptID().getJobID()); Configuration conf = jobContext.getConfiguration(); String workPath = "/tmp1/" + String.valueOf(rand.nextLong()); String finalPath = "/tmp1/" + String.valueOf(rand.nextLong()); FileSystem fs = null; try { OutputCommitter committer = new CopyCommitter(null, taskAttemptContext); fs = FileSystem.get(conf); fs.mkdirs(new Path(workPath)); fs.mkdirs(new Path(finalPath)); conf.set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH, workPath); conf.set(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH, finalPath); conf.setBoolean(DistCpConstants.CONF_LABEL_ATOMIC_COPY, true); Assert.assertTrue(fs.exists(new Path(workPath))); Assert.assertTrue(fs.exists(new Path(finalPath))); try { committer.commitJob(jobContext); Assert.fail("Should not be able to atomic-commit to pre-existing path."); } catch (Exception exception) { Assert.assertTrue(fs.exists(new Path(workPath))); Assert.assertTrue(fs.exists(new Path(finalPath))); LOG.info("Atomic-commit Test pass."); } } catch (IOException e) { LOG.error("Exception encountered while testing for atomic commit.", e); Assert.fail("Atomic commit failure"); } finally { TestDistCpUtils.delete(fs, workPath); TestDistCpUtils.delete(fs, finalPath); } }
/** * Drop all tables found in JdbcContentPersistenceService.TABLES_EXISTS map after each test and * also clear this map itself. */ @After public void clearDatabase() { try { final Connection conn = this.getTested().searchiskoDs.getConnection(); Set<String> tables = JdbcContentPersistenceService.TABLES_EXISTS.keySet(); for (String table : tables) { conn.prepareStatement("drop table " + table).execute(); } conn.commit(); JdbcContentPersistenceService.TABLES_EXISTS.clear(); } catch (SQLException e) { Assert.fail(e.getMessage()); } }
private void assertRowCount( JdbcContentPersistenceService tested, String sysContentType, int expectedCount) { final String tablename = tested.getTableName(sysContentType); int result = 0; try (final Connection conn = this.getTested().searchiskoDs.getConnection(); final PreparedStatement statement = conn.prepareStatement(String.format("select count(*) from %s", tablename)); final ResultSet rs = statement.executeQuery()) { while (rs.next()) { result = rs.getInt(1); } Assert.assertEquals(expectedCount, result); } catch (SQLException e) { Assert.fail(e.getMessage()); } }
@Test public void testNoCommitAction() { TaskAttemptContext taskAttemptContext = getTaskAttemptContext(config); JobContext jobContext = new JobContextImpl( taskAttemptContext.getConfiguration(), taskAttemptContext.getTaskAttemptID().getJobID()); try { OutputCommitter committer = new CopyCommitter(null, taskAttemptContext); committer.commitJob(jobContext); Assert.assertEquals(taskAttemptContext.getStatus(), "Commit Successful"); // Test for idempotent commit committer.commitJob(jobContext); Assert.assertEquals(taskAttemptContext.getStatus(), "Commit Successful"); } catch (IOException e) { LOG.error("Exception encountered ", e); Assert.fail("Commit failed"); } }
protected void waitForMoving(HRegionInfo hRegionInfo, ServerName serverName) throws Exception { Map<byte[], HServerLoad.RegionLoad> regionsLoad = null; for (int i = 0; i < MAX_WAIT_ITERATION; i++) { HServerLoad load = admin.getClusterStatus().getLoad(serverName); regionsLoad = load.getRegionsLoad(); for (byte[] regionName : regionsLoad.keySet()) { if (Arrays.equals(regionName, hRegionInfo.getRegionName())) return; } admin.move(hRegionInfo.getEncodedNameAsBytes(), serverName.getServerName().getBytes()); Thread.sleep(WAIT_INTERVAL); } System.out.println("hRegionInfo = " + Bytes.toString(hRegionInfo.getRegionName())); for (Map.Entry<byte[], HServerLoad.RegionLoad> entry : regionsLoad.entrySet()) { System.out.println( "regionsLoad = " + Bytes.toString(entry.getKey()) + " - " + entry.getValue()); } Assert.fail(Util.getMethodName() + " failed"); }
@Test public void testRemoveAllWithPredicateInterrupted() { addFromArray( this.set, newArray(this.k0, this.k1, this.k2, this.k3, this.k4, this.k5, this.k6, this.k7, this.k8)); final RuntimeException t = new RuntimeException(); try { // the assert below should never be triggered because of the exception // so give it an invalid value in case the thing terminates = initial size + 1 Assert.assertEquals( 10, this.set.removeAll( new KTypePredicate<KType>() { @Override public boolean apply(final KType v) { if (v == AbstractKTypeHashSetTest.this.key7) { throw t; } return v == AbstractKTypeHashSetTest.this.key2 || v == AbstractKTypeHashSetTest.this.key9 || v == AbstractKTypeHashSetTest.this.key5; }; })); Assert.fail(); } catch (final RuntimeException e) { // Make sure it's really our exception... if (e != t) { throw e; } } // And check if the set is in consistent state. We cannot predict the pattern, // but we know that since key7 throws an exception, key7 is still present in the set. Assert.assertTrue(this.set.contains(this.key7)); checkConsistency(); }
@Ignore // don't care for any reordering @Test(timeout = 10000) public void flatMapRangeAsyncLoop() { for (int i = 0; i < 2000; i++) { if (i % 10 == 0) { System.out.println("flatMapRangeAsyncLoop > " + i); } TestSubscriber<Integer> ts = new TestSubscriber<Integer>(); Observable.range(0, 1000) .flatMap( new Func1<Integer, Observable<Integer>>() { @Override public Observable<Integer> call(Integer t) { return Observable.just(t); } }) .observeOn(Schedulers.computation()) .subscribe(ts); ts.awaitTerminalEvent(2500, TimeUnit.MILLISECONDS); if (ts.getOnCompletedEvents().isEmpty()) { System.out.println(ts.getOnNextEvents().size()); } ts.assertTerminalEvent(); ts.assertNoErrors(); List<Integer> list = ts.getOnNextEvents(); assertEquals(1000, list.size()); boolean f = false; for (int j = 0; j < list.size(); j++) { if (list.get(j) != j) { System.out.println(j + " " + list.get(j)); f = true; } } if (f) { Assert.fail("Results are out of order!"); } } }
/** * Validate if BO instances can be created once only with the same primary key and that they can * be queried either via findWithPrimaryKey() or with help of data filters. */ @Test public void CreateOrdersCheck() { DeployedModelDescription model = sf.getQueryService().getModels(DeployedModelQuery.findActiveForId(MODEL_NAME2)).get(0); createOrder(model, 666); try { createOrder(model, 666); Assert.fail("Extected BPMRT03825 error message"); } catch (ObjectExistsException ex) { Assert.assertEquals("Error code", "BPMRT03825", ex.getError().getId()); } String businessObjectQualifiedId = new QName(model.getId(), "Order").toString(); BusinessObjectQuery query = BusinessObjectQuery.findWithPrimaryKey(businessObjectQualifiedId, 666); query.setPolicy(new BusinessObjectQuery.Policy(BusinessObjectQuery.Option.WITH_VALUES)); BusinessObjects bos = sf.getQueryService().getAllBusinessObjects(query); Assert.assertEquals("Objects", 1, bos.getSize()); BusinessObject bo = bos.get(0); List<Value> values = bo.getValues(); Assert.assertEquals("Values", 1, values.size()); checkValue(values, true, "customerId", 666); query = BusinessObjectQuery.findForBusinessObject(businessObjectQualifiedId); query .getFilter() .addOrTerm() .or(DataFilter.isEqual("Order", "customerId", 2)) .or(DataFilter.isEqual("Order", "customerId", 4)); query.setPolicy(new BusinessObjectQuery.Policy(BusinessObjectQuery.Option.WITH_VALUES)); bos = sf.getQueryService().getAllBusinessObjects(query); Assert.assertEquals("Objects", 1, bos.getSize()); bo = bos.get(0); values = bo.getValues(); Assert.assertEquals("Values", 2, values.size()); checkValue(values, true, "customerId", 2, 4); }
private static void unexpected(Object element) { Assert.fail("Unexpected element: " + element); }