@Test public void testDefaults() { ConfigAssertions.assertRecordedDefaults( ConfigAssertions.recordDefaults(CassandraClientConfig.class) .setLimitForPartitionKeySelect(200) .setFetchSizeForPartitionKeySelect(20_000) .setMaxSchemaRefreshThreads(10) .setSchemaCacheTtl(new Duration(1, TimeUnit.HOURS)) .setSchemaRefreshInterval(new Duration(2, TimeUnit.MINUTES)) .setFetchSize(5_000) .setConsistencyLevel(ConsistencyLevel.ONE) .setContactPoints("") .setNativeProtocolPort(9042) .setPartitionSizeForBatchSelect(100) .setSplitSize(1_024) .setPartitioner("Murmur3Partitioner") .setThriftPort(9160) .setTransportFactoryOptions("") .setThriftConnectionFactoryClassName( "org.apache.cassandra.thrift.TFramedTransportFactory") .setAllowDropTable(false) .setUsername(null) .setPassword(null) .setClientReadTimeout(SocketOptions.DEFAULT_READ_TIMEOUT_MILLIS) .setClientConnectTimeout(SocketOptions.DEFAULT_CONNECT_TIMEOUT_MILLIS) .setClientSoLinger(null) .setRetryPolicy(RetryPolicyType.DEFAULT)); }
@Test public void testDefaults() { ConfigAssertions.assertRecordedDefaults( ConfigAssertions.recordDefaults(KerberosConfig.class) .setConfig(null) .setKeytab(null) .setCredentialCache(null)); }
@Test public void testDefaults() { ConfigAssertions.assertRecordedDefaults( ConfigAssertions.recordDefaults(HiveClientConfig.class) .setTimeZone(TimeZone.getDefault().getID()) .setMaxSplitSize(new DataSize(64, Unit.MEGABYTE)) .setMaxOutstandingSplits(1_000) .setMaxSplitIteratorThreads(1_000) .setAllowDropTable(false) .setAllowRenameTable(false) .setAllowCorruptWritesForTesting(false) .setMetastoreCacheTtl(new Duration(1, TimeUnit.HOURS)) .setMetastoreRefreshInterval(new Duration(1, TimeUnit.SECONDS)) .setMaxMetastoreRefreshThreads(100) .setMetastoreSocksProxy(null) .setMetastoreTimeout(new Duration(10, TimeUnit.SECONDS)) .setMinPartitionBatchSize(10) .setMaxPartitionBatchSize(100) .setMaxInitialSplits(200) .setMaxInitialSplitSize(new DataSize(32, Unit.MEGABYTE)) .setForceLocalScheduling(false) .setRecursiveDirWalkerEnabled(false) .setDfsTimeout(new Duration(10, TimeUnit.SECONDS)) .setDfsConnectTimeout(new Duration(500, TimeUnit.MILLISECONDS)) .setDfsConnectMaxRetries(5) .setVerifyChecksum(true) .setResourceConfigFiles((String) null) .setHiveStorageFormat(HiveStorageFormat.RCBINARY) .setDomainSocketPath(null) .setUseParquetColumnNames(false) .setS3AwsAccessKey(null) .setS3AwsSecretKey(null) .setS3UseInstanceCredentials(true) .setS3SslEnabled(true) .setS3MaxClientRetries(3) .setS3MaxErrorRetries(10) .setS3MaxBackoffTime(new Duration(10, TimeUnit.MINUTES)) .setS3MaxRetryTime(new Duration(10, TimeUnit.MINUTES)) .setS3ConnectTimeout(new Duration(5, TimeUnit.SECONDS)) .setS3SocketTimeout(new Duration(5, TimeUnit.SECONDS)) .setS3MultipartMinFileSize(new DataSize(16, Unit.MEGABYTE)) .setS3MultipartMinPartSize(new DataSize(5, Unit.MEGABYTE)) .setS3MaxConnections(500) .setS3StagingDirectory(new File(StandardSystemProperty.JAVA_IO_TMPDIR.value())) .setOptimizedReaderEnabled(true) .setAssumeCanonicalPartitionKeys(false) .setOrcMaxMergeDistance(new DataSize(1, Unit.MEGABYTE)) .setOrcMaxBufferSize(new DataSize(8, Unit.MEGABYTE)) .setOrcStreamBufferSize(new DataSize(8, Unit.MEGABYTE))); }
@Test public void testExplicitPropertyMappings() { Map<String, String> properties = new ImmutableMap.Builder<String, String>() .put("cassandra.limit-for-partition-key-select", "100") .put("cassandra.fetch-size-for-partition-key-select", "500") .put("cassandra.max-schema-refresh-threads", "2") .put("cassandra.schema-cache-ttl", "2h") .put("cassandra.schema-refresh-interval", "30m") .put("cassandra.contact-points", "host1,host2") .put("cassandra.native-protocol-port", "9999") .put("cassandra.fetch-size", "10000") .put("cassandra.consistency-level", "TWO") .put("cassandra.partition-size-for-batch-select", "77") .put("cassandra.split-size", "1025") .put("cassandra.thrift-port", "9161") .put("cassandra.partitioner", "RandomPartitioner") .put("cassandra.transport-factory-options", "a=b") .put( "cassandra.thrift-connection-factory-class", "org.apache.cassandra.thrift.TFramedTransportFactory1") .put("cassandra.allow-drop-table", "true") .put("cassandra.username", "my_username") .put("cassandra.password", "my_password") .put("cassandra.client.read-timeout", "11") .put("cassandra.client.connect-timeout", "22") .put("cassandra.client.so-linger", "33") .put("cassandra.retry-policy", "BACKOFF") .build(); CassandraClientConfig expected = new CassandraClientConfig() .setLimitForPartitionKeySelect(100) .setFetchSizeForPartitionKeySelect(500) .setMaxSchemaRefreshThreads(2) .setSchemaCacheTtl(new Duration(2, TimeUnit.HOURS)) .setSchemaRefreshInterval(new Duration(30, TimeUnit.MINUTES)) .setContactPoints("host1", "host2") .setNativeProtocolPort(9999) .setFetchSize(10_000) .setConsistencyLevel(ConsistencyLevel.TWO) .setPartitionSizeForBatchSelect(77) .setSplitSize(1_025) .setThriftPort(9161) .setPartitioner("RandomPartitioner") .setTransportFactoryOptions("a=b") .setThriftConnectionFactoryClassName( "org.apache.cassandra.thrift.TFramedTransportFactory1") .setAllowDropTable(true) .setUsername("my_username") .setPassword("my_password") .setClientReadTimeout(11) .setClientConnectTimeout(22) .setClientSoLinger(33) .setRetryPolicy(RetryPolicyType.BACKOFF); ConfigAssertions.assertFullMapping(properties, expected); }
@Test public void testExplicitPropertyMappings() { Map<String, String> properties = new ImmutableMap.Builder<String, String>().put("http-client.threads", "33").build(); AsyncHttpClientConfig expected = new AsyncHttpClientConfig().setWorkerThreads(33); ConfigAssertions.assertFullMapping(properties, expected); }
@Test public void testDefaults() { ConfigAssertions.assertRecordedDefaults( ConfigAssertions.recordDefaults(VerifierConfig.class) .setTestUsername("verifier-test") .setControlUsername("verifier-test") .setTestPassword(null) .setControlPassword(null) .setSuite(null) .setSuites(null) .setSource(null) .setRunId(new DateTime().toString("yyyy-MM-dd")) .setEventClients("human-readable") .setThreadCount(10) .setQueryDatabase(null) .setControlGateway(null) .setTestGateway(null) .setControlTimeout(new Duration(10, TimeUnit.MINUTES)) .setTestTimeout(new Duration(1, TimeUnit.HOURS)) .setBlacklist("") .setWhitelist("") .setMaxRowCount(10_000) .setMaxQueries(1_000_000) .setAlwaysReport(false) .setSuiteRepetitions(1) .setCheckCorrectnessEnabled(true) .setExplainOnly(false) .setSkipCorrectnessRegex("^$") .setQueryRepetitions(1) .setTestCatalogOverride(null) .setTestSchemaOverride(null) .setControlCatalogOverride(null) .setControlSchemaOverride(null) .setQuiet(false) .setVerboseResultsComparison(false) .setEventLogFile(null) .setAdditionalJdbcDriverPath(null) .setTestJdbcDriverName(null) .setControlJdbcDriverName(null)); }
@Test public void testExplicitPropertyMappings() { Map<String, String> properties = new ImmutableMap.Builder<String, String>() .put("http.authentication.krb5.config", "/etc/krb5.conf") .put("http.authentication.krb5.keytab", "/etc/krb5.keytab") .put("http.authentication.krb5.credential-cache", "/etc/krb5.ccache") .build(); KerberosConfig expected = new KerberosConfig() .setConfig(new File("/etc/krb5.conf")) .setKeytab(new File("/etc/krb5.keytab")) .setCredentialCache(new File("/etc/krb5.ccache")); ConfigAssertions.assertFullMapping(properties, expected); }
@Test public void testExplicitPropertyMappings() { Map<String, String> properties = new ImmutableMap.Builder<String, String>() .put("suites", "my_suite") .put("suite", "my_suite") .put("source", "my_source") .put("run-id", "my_run_id") .put("event-client", "file,human-readable") .put("thread-count", "1") .put("blacklist", "1,2") .put("whitelist", "3,4") .put("verbose-results-comparison", "true") .put("max-row-count", "1") .put("max-queries", "1") .put("always-report", "true") .put("suite-repetitions", "2") .put("query-repetitions", "2") .put("check-correctness", "false") .put("explain-only", "true") .put("skip-correctness-regex", "limit") .put("quiet", "true") .put("event-log-file", "./test") .put( "query-database", "jdbc:mysql://localhost:3306/my_database?user=my_username&password=my_password") .put("test.username", "test_user") .put("test.password", "test_password") .put("test.gateway", "jdbc:presto://localhost:8080") .put("test.timeout", "1s") .put("test.catalog-override", "my_catalog") .put("test.schema-override", "my_schema") .put("control.username", "control_user") .put("control.password", "control_password") .put("control.gateway", "jdbc:presto://localhost:8081") .put("control.timeout", "1s") .put("control.catalog-override", "my_catalog") .put("control.schema-override", "my_schema") .put("additional-jdbc-driver-path", "/test/path") .put("test.jdbc-driver-class", "com.facebook.exampleclass") .put("control.jdbc-driver-class", "com.facebook.exampleclass") .build(); VerifierConfig expected = new VerifierConfig() .setTestUsername("verifier-test") .setSuites("my_suite") .setSuite("my_suite") .setSource("my_source") .setRunId("my_run_id") .setEventClients("file,human-readable") .setThreadCount(1) .setBlacklist("1,2") .setWhitelist("3,4") .setMaxRowCount(1) .setMaxQueries(1) .setAlwaysReport(true) .setVerboseResultsComparison(true) .setSuiteRepetitions(2) .setQueryRepetitions(2) .setCheckCorrectnessEnabled(false) .setExplainOnly(true) .setSkipCorrectnessRegex("limit") .setQuiet(true) .setEventLogFile("./test") .setQueryDatabase( "jdbc:mysql://localhost:3306/my_database?user=my_username&password=my_password") .setTestUsername("test_user") .setTestPassword("test_password") .setTestGateway("jdbc:presto://localhost:8080") .setTestTimeout(new Duration(1, TimeUnit.SECONDS)) .setTestCatalogOverride("my_catalog") .setTestSchemaOverride("my_schema") .setControlUsername("control_user") .setControlPassword("control_password") .setControlGateway("jdbc:presto://localhost:8081") .setControlTimeout(new Duration(1, TimeUnit.SECONDS)) .setControlCatalogOverride("my_catalog") .setControlSchemaOverride("my_schema") .setAdditionalJdbcDriverPath("/test/path") .setTestJdbcDriverName("com.facebook.exampleclass") .setControlJdbcDriverName("com.facebook.exampleclass"); ConfigAssertions.assertFullMapping(properties, expected); }
@Test public void testDefaults() { ConfigAssertions.assertRecordedDefaults( ConfigAssertions.recordDefaults(AsyncHttpClientConfig.class).setWorkerThreads(16)); }
@Test public void testExplicitPropertyMappings() { Map<String, String> properties = new ImmutableMap.Builder<String, String>() .put("hive.time-zone", nonDefaultTimeZone().getID()) .put("hive.max-split-size", "256MB") .put("hive.max-outstanding-splits", "10") .put("hive.max-split-iterator-threads", "10") .put("hive.allow-drop-table", "true") .put("hive.allow-rename-table", "true") .put("hive.allow-corrupt-writes-for-testing", "true") .put("hive.metastore-cache-ttl", "2h") .put("hive.metastore-refresh-interval", "30m") .put("hive.metastore-refresh-max-threads", "2500") .put("hive.metastore.thrift.client.socks-proxy", "localhost:1080") .put("hive.metastore-timeout", "20s") .put("hive.metastore.partition-batch-size.min", "1") .put("hive.metastore.partition-batch-size.max", "1000") .put("hive.dfs-timeout", "33s") .put("hive.dfs.connect.timeout", "20s") .put("hive.dfs.connect.max-retries", "10") .put("hive.dfs.verify-checksum", "false") .put("hive.config.resources", "/foo.xml,/bar.xml") .put("hive.max-initial-splits", "10") .put("hive.max-initial-split-size", "16MB") .put("hive.recursive-directories", "true") .put("hive.storage-format", "SEQUENCEFILE") .put("hive.force-local-scheduling", "true") .put("hive.assume-canonical-partition-keys", "true") .put("dfs.domain-socket-path", "/foo") .put("hive.parquet.use-column-names", "true") .put("hive.s3.aws-access-key", "abc123") .put("hive.s3.aws-secret-key", "secret") .put("hive.s3.use-instance-credentials", "false") .put("hive.s3.ssl.enabled", "false") .put("hive.s3.max-client-retries", "9") .put("hive.s3.max-error-retries", "8") .put("hive.s3.max-backoff-time", "4m") .put("hive.s3.max-retry-time", "20m") .put("hive.s3.connect-timeout", "8s") .put("hive.s3.socket-timeout", "4m") .put("hive.s3.multipart.min-file-size", "32MB") .put("hive.s3.multipart.min-part-size", "15MB") .put("hive.s3.max-connections", "77") .put("hive.s3.staging-directory", "/s3-staging") .put("hive.optimized-reader.enabled", "false") .put("hive.orc.max-merge-distance", "22kB") .put("hive.orc.max-buffer-size", "44kB") .put("hive.orc.stream-buffer-size", "55kB") .build(); HiveClientConfig expected = new HiveClientConfig() .setTimeZone(nonDefaultTimeZone().toTimeZone()) .setMaxSplitSize(new DataSize(256, Unit.MEGABYTE)) .setMaxOutstandingSplits(10) .setMaxSplitIteratorThreads(10) .setAllowDropTable(true) .setAllowRenameTable(true) .setAllowCorruptWritesForTesting(true) .setMetastoreCacheTtl(new Duration(2, TimeUnit.HOURS)) .setMetastoreRefreshInterval(new Duration(30, TimeUnit.MINUTES)) .setMaxMetastoreRefreshThreads(2500) .setMetastoreSocksProxy(HostAndPort.fromParts("localhost", 1080)) .setMetastoreTimeout(new Duration(20, TimeUnit.SECONDS)) .setMinPartitionBatchSize(1) .setMaxPartitionBatchSize(1000) .setMaxInitialSplits(10) .setMaxInitialSplitSize(new DataSize(16, Unit.MEGABYTE)) .setForceLocalScheduling(true) .setRecursiveDirWalkerEnabled(true) .setDfsTimeout(new Duration(33, TimeUnit.SECONDS)) .setDfsConnectTimeout(new Duration(20, TimeUnit.SECONDS)) .setDfsConnectMaxRetries(10) .setVerifyChecksum(false) .setResourceConfigFiles(ImmutableList.of("/foo.xml", "/bar.xml")) .setHiveStorageFormat(HiveStorageFormat.SEQUENCEFILE) .setDomainSocketPath("/foo") .setUseParquetColumnNames(true) .setS3AwsAccessKey("abc123") .setS3AwsSecretKey("secret") .setS3UseInstanceCredentials(false) .setS3SslEnabled(false) .setS3MaxClientRetries(9) .setS3MaxErrorRetries(8) .setS3MaxBackoffTime(new Duration(4, TimeUnit.MINUTES)) .setS3MaxRetryTime(new Duration(20, TimeUnit.MINUTES)) .setS3ConnectTimeout(new Duration(8, TimeUnit.SECONDS)) .setS3SocketTimeout(new Duration(4, TimeUnit.MINUTES)) .setS3MultipartMinFileSize(new DataSize(32, Unit.MEGABYTE)) .setS3MultipartMinPartSize(new DataSize(15, Unit.MEGABYTE)) .setS3MaxConnections(77) .setS3StagingDirectory(new File("/s3-staging")) .setOptimizedReaderEnabled(false) .setAssumeCanonicalPartitionKeys(true) .setOrcMaxMergeDistance(new DataSize(22, Unit.KILOBYTE)) .setOrcMaxBufferSize(new DataSize(44, Unit.KILOBYTE)) .setOrcStreamBufferSize(new DataSize(55, Unit.KILOBYTE)); ConfigAssertions.assertFullMapping(properties, expected); }