private void testSetInstance_ZKInstance(boolean dashZ) throws Exception { ClientConfiguration clientConf = createMock(ClientConfiguration.class); ShellOptionsJC opts = createMock(ShellOptionsJC.class); expect(opts.isFake()).andReturn(false); expect(opts.getClientConfiguration()).andReturn(clientConf); expect(opts.isHdfsZooInstance()).andReturn(false); if (dashZ) { expect(clientConf.withInstance("foo")).andReturn(clientConf); expect(clientConf.withZkHosts("host1,host2")).andReturn(clientConf); List<String> zl = new java.util.ArrayList<String>(); zl.add("foo"); zl.add("host1,host2"); expect(opts.getZooKeeperInstance()).andReturn(zl); expectLastCall().anyTimes(); } else { expect(clientConf.withInstance("bar")).andReturn(clientConf); expect(clientConf.withZkHosts("host3,host4")).andReturn(clientConf); expect(opts.getZooKeeperInstance()).andReturn(Collections.<String>emptyList()); expect(opts.getZooKeeperInstanceName()).andReturn("bar"); expect(opts.getZooKeeperHosts()).andReturn("host3,host4"); } replay(clientConf); replay(opts); ZooKeeperInstance theInstance = createMock(ZooKeeperInstance.class); expectNew(ZooKeeperInstance.class, clientConf).andReturn(theInstance); replay(theInstance, ZooKeeperInstance.class); shell.setInstance(opts); verify(theInstance, ZooKeeperInstance.class); }
/** The run method which sets the configuration and starts the MapReduce job */ public int run(String[] args) throws Exception { if (USE_MINI_ACCUMULO) { Connector connector = LocalEnvUtil.getConnector(userPass); userName = "******"; instanceName = connector.getInstance().getInstanceName(); zookeepers = connector.getInstance().getZooKeepers(); } // Create and initialize a MapReduce Job Job job = Job.getInstance(getConf(), "tweetIndexer"); job.setJarByClass(IndexedDocIndexer.class); // Set the AccumuloInputFormat so the mapper can read from Accumulo AccumuloInputFormat.setConnectorInfo(job, userName, new PasswordToken(userPass)); AccumuloInputFormat.setInputTableName(job, twitterDataTable); AccumuloInputFormat.setScanAuthorizations(job, new Authorizations()); ClientConfiguration clientConfig = new ClientConfiguration(); clientConfig.withInstance(instanceName); clientConfig.withZkHosts(zookeepers); AccumuloInputFormat.setZooKeeperInstance(job, clientConfig); AccumuloOutputFormat.setConnectorInfo(job, userName, new PasswordToken(userPass)); AccumuloOutputFormat.setCreateTables(job, createTables); AccumuloOutputFormat.setDefaultTableName(job, tweetDocIndex); AccumuloOutputFormat.setZooKeeperInstance(job, clientConfig); // Set the map and reduce classes job.setMapperClass(TweetMapper.class); job.setReducerClass(TweetReducer.class); // Set the output key and value class for the mapper job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(Text.class); // Set the output key and value class for the reducer job.setOutputKeyClass(Text.class); job.setOutputValueClass(Mutation.class); // Set the InputFormat and OutputFormat for the job job.setInputFormatClass(AccumuloInputFormat.class); job.setOutputFormatClass(AccumuloOutputFormat.class); // Run the MapReduce job and return 0 for success, 1 otherwise return job.waitForCompletion(true) ? 0 : 1; }
private void testSetInstance_HdfsZooInstance( boolean explicitHdfs, boolean onlyInstance, boolean onlyHosts) throws Exception { ClientConfiguration clientConf = createMock(ClientConfiguration.class); ShellOptionsJC opts = createMock(ShellOptionsJC.class); expect(opts.isFake()).andReturn(false); expect(opts.getClientConfiguration()).andReturn(clientConf); expect(opts.isHdfsZooInstance()).andReturn(explicitHdfs); if (!explicitHdfs) { expect(opts.getZooKeeperInstance()).andReturn(Collections.<String>emptyList()); if (onlyInstance) { expect(opts.getZooKeeperInstanceName()).andReturn("instance"); expect(clientConf.withInstance("instance")).andReturn(clientConf); } else { expect(opts.getZooKeeperInstanceName()).andReturn(null); } if (onlyHosts) { expect(opts.getZooKeeperHosts()).andReturn("host3,host4"); expect(clientConf.withZkHosts("host3,host4")).andReturn(clientConf); } else { expect(opts.getZooKeeperHosts()).andReturn(null); } } replay(opts); if (!onlyInstance) { expect(clientConf.get(ClientProperty.INSTANCE_NAME)).andReturn(null); } mockStatic(ConfigSanityCheck.class); ConfigSanityCheck.validate(EasyMock.<AccumuloConfiguration>anyObject()); expectLastCall().atLeastOnce(); replay(ConfigSanityCheck.class); if (!onlyHosts) { expect(clientConf.containsKey(Property.INSTANCE_ZK_HOST.getKey())) .andReturn(true) .atLeastOnce(); expect(clientConf.getString(Property.INSTANCE_ZK_HOST.getKey())) .andReturn("host1,host2") .atLeastOnce(); expect(clientConf.withZkHosts("host1,host2")).andReturn(clientConf); } if (!onlyInstance) { expect(clientConf.containsKey(Property.INSTANCE_VOLUMES.getKey())) .andReturn(false) .atLeastOnce(); @SuppressWarnings("deprecation") String INSTANCE_DFS_DIR_KEY = Property.INSTANCE_DFS_DIR.getKey(); @SuppressWarnings("deprecation") String INSTANCE_DFS_URI_KEY = Property.INSTANCE_DFS_URI.getKey(); expect(clientConf.containsKey(INSTANCE_DFS_DIR_KEY)).andReturn(true).atLeastOnce(); expect(clientConf.containsKey(INSTANCE_DFS_URI_KEY)).andReturn(true).atLeastOnce(); expect(clientConf.getString(INSTANCE_DFS_URI_KEY)).andReturn("hdfs://nn1").atLeastOnce(); expect(clientConf.getString(INSTANCE_DFS_DIR_KEY)).andReturn("/dfs").atLeastOnce(); } UUID randomUUID = null; if (!onlyInstance) { mockStatic(ZooUtil.class); randomUUID = UUID.randomUUID(); expect( ZooUtil.getInstanceIDFromHdfs( anyObject(Path.class), anyObject(AccumuloConfiguration.class))) .andReturn(randomUUID.toString()); replay(ZooUtil.class); expect(clientConf.withInstance(randomUUID)).andReturn(clientConf); } replay(clientConf); ZooKeeperInstance theInstance = createMock(ZooKeeperInstance.class); expectNew(ZooKeeperInstance.class, clientConf).andReturn(theInstance); replay(theInstance, ZooKeeperInstance.class); shell.setInstance(opts); verify(theInstance, ZooKeeperInstance.class); }