private void testSetInstance_ZKInstance(boolean dashZ) throws Exception { ClientConfiguration clientConf = createMock(ClientConfiguration.class); ShellOptionsJC opts = createMock(ShellOptionsJC.class); expect(opts.isFake()).andReturn(false); expect(opts.getClientConfiguration()).andReturn(clientConf); expect(opts.isHdfsZooInstance()).andReturn(false); if (dashZ) { expect(clientConf.withInstance("foo")).andReturn(clientConf); expect(clientConf.withZkHosts("host1,host2")).andReturn(clientConf); List<String> zl = new java.util.ArrayList<String>(); zl.add("foo"); zl.add("host1,host2"); expect(opts.getZooKeeperInstance()).andReturn(zl); expectLastCall().anyTimes(); } else { expect(clientConf.withInstance("bar")).andReturn(clientConf); expect(clientConf.withZkHosts("host3,host4")).andReturn(clientConf); expect(opts.getZooKeeperInstance()).andReturn(Collections.<String>emptyList()); expect(opts.getZooKeeperInstanceName()).andReturn("bar"); expect(opts.getZooKeeperHosts()).andReturn("host3,host4"); } replay(clientConf); replay(opts); ZooKeeperInstance theInstance = createMock(ZooKeeperInstance.class); expectNew(ZooKeeperInstance.class, clientConf).andReturn(theInstance); replay(theInstance, ZooKeeperInstance.class); shell.setInstance(opts); verify(theInstance, ZooKeeperInstance.class); }
public ClientConfiguration getClientConfiguration() throws ConfigurationException, FileNotFoundException { ClientConfiguration clientConfig = clientConfigFile == null ? ClientConfiguration.loadDefault() : new ClientConfiguration(new PropertiesConfiguration(getClientConfigFile())); if (useSsl()) { clientConfig.setProperty(ClientProperty.INSTANCE_RPC_SSL_ENABLED, "true"); } return clientConfig; }
/** The run method which sets the configuration and starts the MapReduce job */ public int run(String[] args) throws Exception { if (USE_MINI_ACCUMULO) { Connector connector = LocalEnvUtil.getConnector(userPass); userName = "******"; instanceName = connector.getInstance().getInstanceName(); zookeepers = connector.getInstance().getZooKeepers(); } // Create and initialize a MapReduce Job Job job = Job.getInstance(getConf(), "tweetIndexer"); job.setJarByClass(IndexedDocIndexer.class); // Set the AccumuloInputFormat so the mapper can read from Accumulo AccumuloInputFormat.setConnectorInfo(job, userName, new PasswordToken(userPass)); AccumuloInputFormat.setInputTableName(job, twitterDataTable); AccumuloInputFormat.setScanAuthorizations(job, new Authorizations()); ClientConfiguration clientConfig = new ClientConfiguration(); clientConfig.withInstance(instanceName); clientConfig.withZkHosts(zookeepers); AccumuloInputFormat.setZooKeeperInstance(job, clientConfig); AccumuloOutputFormat.setConnectorInfo(job, userName, new PasswordToken(userPass)); AccumuloOutputFormat.setCreateTables(job, createTables); AccumuloOutputFormat.setDefaultTableName(job, tweetDocIndex); AccumuloOutputFormat.setZooKeeperInstance(job, clientConfig); // Set the map and reduce classes job.setMapperClass(TweetMapper.class); job.setReducerClass(TweetReducer.class); // Set the output key and value class for the mapper job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(Text.class); // Set the output key and value class for the reducer job.setOutputKeyClass(Text.class); job.setOutputValueClass(Mutation.class); // Set the InputFormat and OutputFormat for the job job.setInputFormatClass(AccumuloInputFormat.class); job.setOutputFormatClass(AccumuloOutputFormat.class); // Run the MapReduce job and return 0 for success, 1 otherwise return job.waitForCompletion(true) ? 0 : 1; }
@Override public int run(String[] args) throws Exception { String instance = args[0]; String zookeepers = args[1]; String user = args[2]; String tokenFile = args[3]; String input = args[4]; String tableName = args[5]; Job job = Job.getInstance(getConf()); job.setJobName(TokenFileWordCount.class.getName()); job.setJarByClass(this.getClass()); job.setInputFormatClass(TextInputFormat.class); TextInputFormat.setInputPaths(job, input); job.setMapperClass(MapClass.class); job.setNumReduceTasks(0); job.setOutputFormatClass(AccumuloOutputFormat.class); job.setOutputKeyClass(Text.class); job.setOutputValueClass(Mutation.class); // AccumuloInputFormat not used here, but it uses the same functions. AccumuloOutputFormat.setZooKeeperInstance( job, ClientConfiguration.loadDefault().withInstance(instance).withZkHosts(zookeepers)); AccumuloOutputFormat.setConnectorInfo(job, user, tokenFile); AccumuloOutputFormat.setCreateTables(job, true); AccumuloOutputFormat.setDefaultTableName(job, tableName); job.waitForCompletion(true); return 0; }
/** * Set all the initial parameters needed in this class for connectivity out to Accumulo. * * @param context */ private void initialize(JobContext context) { // Configuration conf){ Configuration conf = context.getConfiguration(); try { // output zoom level log.info("Working from zoom level = " + zoomLevel); if (zoomLevel == -1) { zoomLevel = Integer.parseInt(conf.get(MrGeoAccumuloConstants.MRGEO_ACC_KEY_ZOOMLEVEL)); } table = conf.get(MrGeoAccumuloConstants.MRGEO_ACC_KEY_OUTPUT_TABLE); username = conf.get(MrGeoAccumuloConstants.MRGEO_ACC_KEY_USER); instanceName = conf.get(MrGeoAccumuloConstants.MRGEO_ACC_KEY_INSTANCE); zooKeepers = conf.get(MrGeoAccumuloConstants.MRGEO_ACC_KEY_ZOOKEEPERS); String pl = conf.get(MrGeoConstants.MRGEO_PROTECTION_LEVEL); if (pl != null) { colViz = new ColumnVisibility(pl); } else if (colViz == null) { vizStr = conf.get(MrGeoAccumuloConstants.MRGEO_ACC_KEY_VIZ); if (vizStr == null) { colViz = new ColumnVisibility(); } else { colViz = new ColumnVisibility(vizStr); } } password = conf.get(MrGeoAccumuloConstants.MRGEO_ACC_KEY_PASSWORD); String isEnc = conf.get(MrGeoAccumuloConstants.MRGEO_ACC_KEY_PWENCODED64, "false"); if (isEnc.equalsIgnoreCase("true")) { password = Base64Utils.decodeToString(password); } if (_innerFormat != null) { return; } _innerFormat = AccumuloOutputFormat.class.newInstance(); AuthenticationToken token = new PasswordToken(password.getBytes()); // log.info("Setting output with: u = " + username); // log.info("Setting output with: p = " + password); // log.info("Setting output with: i = " + instanceName); // log.info("Setting output with: z = " + zooKeepers); boolean connSet = ConfiguratorBase.isConnectorInfoSet(AccumuloOutputFormat.class, conf); if (!connSet) { // job not always available - do it how Accumulo does it OutputConfigurator.setConnectorInfo(AccumuloOutputFormat.class, conf, username, token); ClientConfiguration cc = ClientConfiguration.loadDefault().withInstance(instanceName); cc.setProperty(ClientProperty.INSTANCE_ZK_HOST, zooKeepers); OutputConfigurator.setZooKeeperInstance(AccumuloOutputFormat.class, conf, cc); OutputConfigurator.setDefaultTableName(AccumuloOutputFormat.class, conf, table); OutputConfigurator.setCreateTables(AccumuloOutputFormat.class, conf, true); outputInfoSet = true; } } catch (InstantiationException e) { // TODO Auto-generated catch block e.printStackTrace(); } catch (IllegalAccessException e) { // TODO Auto-generated catch block e.printStackTrace(); } catch (AccumuloSecurityException ase) { ase.printStackTrace(); } catch (ClassNotFoundException e) { e.printStackTrace(); } catch (IOException e) { e.printStackTrace(); } } // end initialize
private void testSetInstance_HdfsZooInstance( boolean explicitHdfs, boolean onlyInstance, boolean onlyHosts) throws Exception { ClientConfiguration clientConf = createMock(ClientConfiguration.class); ShellOptionsJC opts = createMock(ShellOptionsJC.class); expect(opts.isFake()).andReturn(false); expect(opts.getClientConfiguration()).andReturn(clientConf); expect(opts.isHdfsZooInstance()).andReturn(explicitHdfs); if (!explicitHdfs) { expect(opts.getZooKeeperInstance()).andReturn(Collections.<String>emptyList()); if (onlyInstance) { expect(opts.getZooKeeperInstanceName()).andReturn("instance"); expect(clientConf.withInstance("instance")).andReturn(clientConf); } else { expect(opts.getZooKeeperInstanceName()).andReturn(null); } if (onlyHosts) { expect(opts.getZooKeeperHosts()).andReturn("host3,host4"); expect(clientConf.withZkHosts("host3,host4")).andReturn(clientConf); } else { expect(opts.getZooKeeperHosts()).andReturn(null); } } replay(opts); if (!onlyInstance) { expect(clientConf.get(ClientProperty.INSTANCE_NAME)).andReturn(null); } mockStatic(ConfigSanityCheck.class); ConfigSanityCheck.validate(EasyMock.<AccumuloConfiguration>anyObject()); expectLastCall().atLeastOnce(); replay(ConfigSanityCheck.class); if (!onlyHosts) { expect(clientConf.containsKey(Property.INSTANCE_ZK_HOST.getKey())) .andReturn(true) .atLeastOnce(); expect(clientConf.getString(Property.INSTANCE_ZK_HOST.getKey())) .andReturn("host1,host2") .atLeastOnce(); expect(clientConf.withZkHosts("host1,host2")).andReturn(clientConf); } if (!onlyInstance) { expect(clientConf.containsKey(Property.INSTANCE_VOLUMES.getKey())) .andReturn(false) .atLeastOnce(); @SuppressWarnings("deprecation") String INSTANCE_DFS_DIR_KEY = Property.INSTANCE_DFS_DIR.getKey(); @SuppressWarnings("deprecation") String INSTANCE_DFS_URI_KEY = Property.INSTANCE_DFS_URI.getKey(); expect(clientConf.containsKey(INSTANCE_DFS_DIR_KEY)).andReturn(true).atLeastOnce(); expect(clientConf.containsKey(INSTANCE_DFS_URI_KEY)).andReturn(true).atLeastOnce(); expect(clientConf.getString(INSTANCE_DFS_URI_KEY)).andReturn("hdfs://nn1").atLeastOnce(); expect(clientConf.getString(INSTANCE_DFS_DIR_KEY)).andReturn("/dfs").atLeastOnce(); } UUID randomUUID = null; if (!onlyInstance) { mockStatic(ZooUtil.class); randomUUID = UUID.randomUUID(); expect( ZooUtil.getInstanceIDFromHdfs( anyObject(Path.class), anyObject(AccumuloConfiguration.class))) .andReturn(randomUUID.toString()); replay(ZooUtil.class); expect(clientConf.withInstance(randomUUID)).andReturn(clientConf); } replay(clientConf); ZooKeeperInstance theInstance = createMock(ZooKeeperInstance.class); expectNew(ZooKeeperInstance.class, clientConf).andReturn(theInstance); replay(theInstance, ZooKeeperInstance.class); shell.setInstance(opts); verify(theInstance, ZooKeeperInstance.class); }