public void testSyslogOutputFormat() throws IOException { // set the output format. FlumeConfiguration conf = FlumeConfiguration.get(); conf.set(FlumeConfiguration.COLLECTOR_OUTPUT_FORMAT, "syslog"); // build a sink that outputs to that format. File f = FileUtil.mktempdir(); SinkBuilder builder = EscapedCustomDfsSink.builder(); EventSink snk = builder.build(new Context(), "file:///" + f.getPath() + "/sub-%{service}"); Event e = new EventImpl("this is a test message".getBytes()); Attributes.setString(e, "service", "foo"); snk.open(); snk.append(e); snk.close(); ByteArrayOutputStream exWriter = new ByteArrayOutputStream(); SyslogEntryFormat fmt = new SyslogEntryFormat(); fmt.format(exWriter, e); exWriter.close(); String expected = new String(exWriter.toByteArray()); // check the output to make sure it is what we expected. File fo = new File(f.getPath() + "/sub-foo"); FileReader fr = new FileReader(fo); BufferedReader br = new BufferedReader(fr); String read = br.readLine() + "\n"; assertEquals(expected, read); }
/** * This creates an environment where we have configurations set and then serving starts. This * simulates a zk configstore load and then the serve call being run. * * <p>Ideally we'd create a SetupTranslatingZKMasterTestEnv, but there is an issue when trying to * start/shutdown and start a new master in the same process/jvm. */ @Before public void setCfgAndStartMaster() throws TTransportException, IOException, FlumeSpecException { // Give ZK a temporary directory, otherwise it's possible we'll reload some // old configs tmpdir = FileUtil.mktempdir(); FlumeConfiguration.createTestableConfiguration(); FlumeConfiguration.get().set(FlumeConfiguration.MASTER_STORE, "memory"); buildMaster(); // Instead of loading from a ZK Store, we just see the config in the "deep" // config manager. Any translations will not occur. ConfigurationManager loaded = cfgMan; loaded.setConfig("node1", "flow", "autoCollectorSource", "null"); loaded.setConfig("node2", "flow", "autoCollectorSource", "null"); loaded.setConfig("node3", "flow", "autoCollectorSource", "null"); loaded.setConfig("node4", "flow", "autoCollectorSource", "null"); loaded.setConfig("agent", "flow", "null", "autoBEChain"); // this is the outer configman, should have no translation. ConfigurationManager cfgman1 = flumeMaster.getSpecMan(); Map<String, FlumeConfigData> cfgs1 = cfgman1.getTranslatedConfigs(); assertEquals(0, cfgs1.size()); // no translations happened // start the master (which should trigger an update and translation flumeMaster.serve(); }
/** This also implements the Apache Commons Daemon interface's start */ public synchronized void start() { FlumeConfiguration conf = FlumeConfiguration.get(); // the simple report interface simpleReportManager.add(vmInfo); simpleReportManager.add(sysInfo); simpleReportManager.add( new Reportable() { @Override public String getName() { return FlumeNode.this.getName(); } @Override public ReportEvent getMetrics() { return FlumeNode.this.getReport(); } @Override public Map<String, Reportable> getSubMetrics() { return ReportUtil.noChildren(); } }); // the full report interface ReportManager.get().add(vmInfo); ReportManager.get().add(sysInfo); ReportManager.get().add(this); if (startHttp) { int nodePort = conf.getNodeStatusPort(); String bindAddress = "0.0.0.0"; ContextCreator cc = new ContextCreator() { @Override public void addContexts(ContextHandlerCollection handlers) { handlers.addHandler(InternalHttpServer.createLogAppContext()); handlers.addHandler(InternalHttpServer.createStackSevletContext()); String webAppRoot = FlumeConfiguration.get().getNodeWebappRoot(); InternalHttpServer.addHandlersFromPaths(handlers, new File(webAppRoot)); } }; http = InternalHttpServer.startFindPortHttpServer(cc, bindAddress, nodePort); } if (reportPusher != null) { reportPusher.start(); } if (liveMan != null) { liveMan.start(); } if (chokeMan != null) { // JVM exits if only daemons threads remain. chokeMan.setDaemon(true); chokeMan.start(); } }
/** * Warning - do not use this constructor if you think it has been called anywhere else! This is * also not thread safe. * * <p>TODO(henry): Proper singleton implementation */ public FlumeMaster(FlumeConfiguration cfg, boolean doHttp) { this.cfg = cfg; instance = this; this.uniqueMasterName = "flume-master-" + cfg.getMasterServerId(); this.doHttp = doHttp; this.cmdman = new CommandManager(); ConfigStore cfgStore = createConfigStore(FlumeConfiguration.get()); this.statman = new StatusManager(); // configuration manager translate user entered configs if (FlumeConfiguration.get().getMasterIsDistributed()) { LOG.info("Distributed master, disabling all config translations"); ConfigurationManager base = new ConfigManager(cfgStore); this.specman = base; } else { // TODO (jon) translated configurations cause problems in multi-master // situations. For now we disallow translation. LOG.info("Single master, config translations enabled"); ConfigurationManager base = new ConfigManager(cfgStore); ConfigurationManager flowedFailovers = new FlowConfigManager.FailoverFlowConfigManager(base, statman); this.specman = new LogicalConfigurationManager(flowedFailovers, new ConfigManager(), statman); } if (FlumeConfiguration.get().getMasterIsDistributed()) { this.ackman = new GossipedMasterAckManager(FlumeConfiguration.get()); } else { this.ackman = new MasterAckManager(); } }
@Before public void setCfg() throws IOException { // Isolate tests by only using simple cfg store cfg = FlumeConfiguration.createTestableConfiguration(); cfg.set(FlumeConfiguration.MASTER_STORE, "memory"); cfg.set(FlumeConfiguration.WEBAPPS_PATH, "build/webapps"); }
@Test public void testCheckup() throws IOException { MockClock clk = new MockClock(0); Clock.setClock(clk); StatusManager stats = new StatusManager(); // foo is in state HELLO and has configuration numbered 0 long prev = Clock.unixTime(); boolean needsRefresh = stats.updateHeartbeatStatus(NetUtils.localhost(), "physnode", "foo", NodeState.HELLO, 0); LOG.info(stats.getNodeStatuses()); assertTrue(needsRefresh); // move forward in time, but not far enough to trigger being lost clk.forward(FlumeConfiguration.get().getConfigHeartbeatPeriod() * 5); stats.checkup(); StatusManager.NodeStatus ns = stats.getNodeStatuses().get("foo"); assertEquals(0, ns.version); assertTrue(prev <= ns.lastseen); assertEquals(NodeState.HELLO, ns.state); prev = ns.lastseen; clk.forward(FlumeConfiguration.get().getConfigHeartbeatPeriod() * 20); stats.checkup(); ns = stats.getNodeStatuses().get("foo"); assertEquals(0, ns.version); assertTrue(prev <= ns.lastseen); assertEquals(NodeState.LOST, ns.state); prev = ns.lastseen; LOG.info(ns.toString()); LOG.info(stats.getStatus("foo").toString()); }
public WALManager addWalManager(String walnode) { Preconditions.checkArgument(walnode != null); FlumeConfiguration conf = FlumeConfiguration.get(); WALManager wm = new NaiveFileWALManager(new File(new File(conf.getAgentLogsDir()), walnode)); synchronized (walMans) { walMans.put(walnode, wm); return wm; } }
public DiskFailoverManager addDFOManager(String dfonode) { Preconditions.checkArgument(dfonode != null); FlumeConfiguration conf = FlumeConfiguration.get(); DiskFailoverManager wm = new NaiveFileFailoverManager(new File(new File(conf.getAgentLogsDir()), dfonode)); synchronized (failoverMans) { failoverMans.put(dfonode, wm); return wm; } }
/** Shutdown all the various servers. */ public void shutdown() { try { if (http != null) { try { http.stop(); } catch (Exception e) { LOG.error("Error stopping FlumeMaster", e); } http = null; } cmdman.stop(); ackman.stop(); if (configServer != null) { configServer.stop(); configServer = null; } if (controlServer != null) { controlServer.stop(); controlServer = null; } /* * Close the reportserver which started. */ if (cfg.getReportServerRPC() == cfg.RPC_TYPE_AVRO) { if (avroReportServer != null) { avroReportServer.stop(); avroReportServer = null; } } else { if (thriftReportServer != null) { thriftReportServer.stop(); thriftReportServer = null; } } specman.stop(); reaper.interrupt(); FlumeConfiguration cfg = FlumeConfiguration.get(); if (cfg.getMasterStore().equals(ZK_CFG_STORE)) { ZooKeeperService.get().shutdown(); } } catch (IOException e) { LOG.error("Exception when shutting down master!", e); } catch (Exception e) { LOG.error("Exception when shutting down master!", e); } }
public static String getWebPath(FlumeConfiguration conf) { String webPath = conf.getWebAppsPath(); File f = new File(webPath); // absolute paths win, but if is not absolute, prefix with flume home if (!f.isAbsolute()) { String basepath = FlumeConfiguration.getFlumeHome(); if (basepath == null) { LOG.warn("FLUME_HOME not set, potential for odd behavior!"); } File base = new File(basepath, webPath); webPath = base.getAbsolutePath(); } return webPath; }
/** Helper function to parse the configuration to decide which kind of config store to start */ public static ConfigStore createConfigStore(FlumeConfiguration cfg) { ConfigStore cfgStore; if (cfg.getMasterStore().equals(ZK_CFG_STORE)) { cfgStore = new ZooKeeperConfigStore(); } else if (cfg.getMasterStore().equals(MEMORY_CFG_STORE)) { if (cfg.getMasterIsDistributed()) { throw new IllegalStateException( "Can't use non-zookeeper store with " + "distributed Master"); } cfgStore = new MemoryBackedConfigStore(); } else { throw new IllegalArgumentException("Unsupported config store: " + cfg.getMasterStore()); } return cfgStore; }
@Test public void testOpenClose() throws IOException, Exception { // Set directory of webapps to build-specific dir FlumeConfiguration.get().set(FlumeConfiguration.WEBAPPS_PATH, "build/webapps"); FlumeConfiguration conf = FlumeConfiguration.get(); String webPath = FlumeNode.getWebPath(conf); int port = FlumeConfiguration.get().getNodeStatusPort(); StatusHttpServer http = new StatusHttpServer("flumeagent", webPath, "0.0.0.0", port, false); for (int i = 0; i < 50; i++) { http.start(); http.stop(); } }
public CheckpointChecker(S s) { super(s); this.listener = new AckListener() { @Override public void end(String group) { LOG.info("ended " + group); } @Override public void err(String group) { LOG.info("erred " + group); } @Override public void start(String group) { LOG.info("start " + group); } @Override public void expired(String group) throws IOException { LOG.info("expired " + group); } }; this.port = FlumeConfiguration.get().getCheckPointPort(); }
/** * Checks to make sure that nodes specified at the master get spawned at the node. * * @throws InterruptedException */ @Test public void testZKMasterDecomission() throws IOException, InterruptedException { // use the simple command manger, non-gossip ackmanager cfg.set(FlumeConfiguration.MASTER_STORE, "zookeeper"); master = new FlumeMaster( new CommandManager(), new ConfigManager(), new StatusManager(), new MasterAckManager(), cfg); master.serve(); MasterRPC rpc = new DirectMasterRPC(master); FlumeNode node = new FlumeNode(rpc, false, false); // should have nothing. assertEquals(0, node.getLogicalNodeManager().getNodes().size()); master.getSpecMan().addLogicalNode(NetUtils.localhost(), node.getPhysicalNodeName()); master.getSpecMan().addLogicalNode(NetUtils.localhost(), "bar"); master.getSpecMan().addLogicalNode(NetUtils.localhost(), "baz"); LivenessManager liveMan = node.getLivenessManager(); liveMan.checkLogicalNodes(); // the two added nodes, plus the always present physnode/logical assertEquals(3, node.getLogicalNodeManager().getNodes().size()); }
public FlumeNode( FlumeConfiguration conf, String nodeName, MasterRPC rpc, boolean startHttp, boolean oneshot) { this.physicalNodeName = nodeName; rpcMan = rpc; instance = this; this.startHttp = startHttp; this.nodesMan = new LogicalNodeManager(nodeName); File defaultDir = new File(conf.getAgentLogsDir(), getPhysicalNodeName()); WALManager walMan = new NaiveFileWALManager(defaultDir); this.walMans.put(getPhysicalNodeName(), walMan); this.failoverMans.put(getPhysicalNodeName(), new NaiveFileFailoverManager(defaultDir)); // no need for liveness tracker if a one shot execution. this.collectorAck = new CollectorAckListener(rpcMan); if (!oneshot) { this.liveMan = new LivenessManager(nodesMan, rpcMan, new FlumeNodeWALNotifier(this.walMans)); this.reportPusher = new MasterReportPusher(conf, simpleReportManager, rpcMan); } else { this.liveMan = null; this.reportPusher = null; } // initializing ChokeController this.chokeMan = new ChokeManager(); this.vmInfo = new FlumeVMInfo(PHYSICAL_NODE_REPORT_PREFIX + this.getPhysicalNodeName() + "."); this.sysInfo = new SystemInfo(PHYSICAL_NODE_REPORT_PREFIX + this.getPhysicalNodeName() + "."); }
/** * A FlumeNode constructor with pluggable xxxManagers. This is used for debugging and test cases. * The http server is assumed not to be started, and we are not doing a oneshot. */ public FlumeNode( String name, MasterRPC rpc, LogicalNodeManager nodesMan, WALManager walMan, DiskFailoverManager dfMan, CollectorAckListener colAck, LivenessManager liveman) { this.physicalNodeName = name; rpcMan = rpc; instance = this; this.startHttp = false; this.nodesMan = nodesMan; this.walMans.put(getPhysicalNodeName(), walMan); this.failoverMans.put(getPhysicalNodeName(), dfMan); this.collectorAck = colAck; this.liveMan = liveman; // As this is only for the testing puposes, just initialize the physical // node limit to Max Int. this.chokeMan = new ChokeManager(); this.vmInfo = new FlumeVMInfo(PHYSICAL_NODE_REPORT_PREFIX + this.physicalNodeName + "."); this.reportPusher = new MasterReportPusher(FlumeConfiguration.get(), simpleReportManager, rpcMan); this.sysInfo = new SystemInfo(PHYSICAL_NODE_REPORT_PREFIX + this.physicalNodeName + "."); }
/** * Build but do not start a master. * * <p>This exposes a hook to the deepest cfgMan which would ideally be a saved ZK backed version * being reloaded from a restarted master. */ void buildMaster() throws IOException { cfgMan = new ConfigManager(FlumeMaster.createConfigStore(FlumeConfiguration.get())); FailoverChainManager fcMan = new ConsistentHashFailoverChainManager(3); ConfigurationManager self2 = new ConfigManager(); ConfigurationManager failover = new FailoverConfigurationManager(cfgMan, self2, fcMan); StatusManager statman = new StatusManager(); ConfigurationManager self = new ConfigManager(); ConfigurationManager logical = new LogicalConfigurationManager(failover, self, statman); flumeMaster = new FlumeMaster( new CommandManager(), logical, statman, new MasterAckManager(), FlumeConfiguration.get()); }
/** * This returns true if the host running this process is in the list of master servers. The index * is set in the FlumeConfiguration. If the host doesn't match, false is returned. If the * hostnames in the master server list fail to resolve, an exception is thrown. */ public static boolean inferMasterHostID() throws UnknownHostException, SocketException { String masters = FlumeConfiguration.get().getMasterServers(); String[] mtrs = masters.split(","); int idx = NetUtils.findHostIndex(mtrs); if (idx < 0) { String localhost = NetUtils.localhost(); LOG.error( "Attempted to start a master '{}' that is not " + "in the master servers list: '{}'", localhost, mtrs); // localhost ips weren't in the list. return false; } FlumeConfiguration.get().setInt(FlumeConfiguration.MASTER_SERVER_ID, idx); LOG.info("Inferred master server index {}", idx); return true; }
public MasterAdminServer(FlumeMaster master, FlumeConfiguration config) throws IOException { Preconditions.checkArgument(master != null, "FlumeConfigMaster is null in MasterAdminServer!"); this.master = master; String rpcType = config.getMasterHeartbeatRPC(); this.stubServer = null; if (FlumeConfiguration.RPC_TYPE_AVRO.equals(rpcType)) { stubServer = new MasterAdminServerAvro(this); } else { throw new IOException("No valid RPC framework specified in config"); } }
/** * This function checks the agent logs dir to make sure that the process has the ability to the * directory if necessary, that the path if it does exist is a directory, and that it can in fact * create files inside of the directory. If it fails any of these, it throws an exception. * * <p>Finally, it checks to see if the path is in /tmp and warns the user that this may not be the * best idea. */ public static void nodeConfigChecksOk() throws IOException { // TODO (jon) if we add more checks in here, make the different managers // responsible for throwing an Exception on construction instead. FlumeConfiguration conf = FlumeConfiguration.get(); String s = conf.getAgentLogsDir(); File f = new File(s); if (!FileUtil.makeDirs(f)) { throw new IOException("Path to Log dir cannot be created: '" + s + "'. Check permissions?"); } if (!f.isDirectory()) { throw new IOException("Log dir '" + s + "' already exists as a file. Check log dir path."); } File f2 = null; try { f2 = File.createTempFile("initcheck", ".test", f); } catch (IOException e) { throw new IOException("Failure to write in log directory: '" + s + "'. Check permissions?"); } if (!f2.delete()) { throw new IOException( "Unable to delete " + f2 + " from log directory " + "(but writing succeeded) - something is strange here"); } File tmp = new File("/tmp"); File cur = f; while (cur != null) { if (cur.equals(tmp)) { LOG.warn("Log directory is writing inside of /tmp. This data may not survive reboot!"); break; } cur = cur.getParentFile(); } }
public CheckPointManagerImpl(String logicalNodeName, String baseDir) { this.logicalNodeName = logicalNodeName; this.baseDir = baseDir; this.fileOffsets = new HashMap<String, Long>(); try { readFromFile(); } catch (IOException e) { log.error("Failed to read checkpoint file", e); } t = new CheckpointThread(this.baseDir, FlumeConfiguration.get().getConfigHeartbeatPeriod() * 2); }
/** * Test to write few log lines, compress using gzip, write to disk, read back the compressed file * and verify the written lines. * * @throws IOException */ public void testGzipOutputFormat() throws IOException { // set the output format. FlumeConfiguration conf = FlumeConfiguration.get(); conf.set(FlumeConfiguration.COLLECTOR_OUTPUT_FORMAT, "syslog"); conf.set(FlumeConfiguration.COLLECTOR_DFS_COMPRESS_GZIP, "true"); // build a sink that outputs to that format. File f = FileUtil.mktempdir(); SinkBuilder builder = EscapedCustomDfsSink.builder(); EventSink snk = builder.build(new Context(), "file:///" + f.getPath() + "/sub-%{service}"); Event e = new EventImpl("this is a test message".getBytes()); Attributes.setString(e, "service", "foo"); snk.open(); snk.append(e); snk.close(); ByteArrayOutputStream exWriter = new ByteArrayOutputStream(); SyslogEntryFormat fmt = new SyslogEntryFormat(); fmt.format(exWriter, e); exWriter.close(); String expected = new String(exWriter.toByteArray()); // check the output to make sure it is what we expected. // read the gzip file and verify the contents GZIPInputStream gzin = new GZIPInputStream(new FileInputStream(f.getPath() + "/sub-foo.gz")); byte[] buf = new byte[1]; StringBuilder output = new StringBuilder(); while ((gzin.read(buf)) > 0) { output.append(new String(buf)); } assertEquals(expected, output.toString()); assertTrue("temp folder successfully deleted", FileUtil.rmr(f)); }
/** * Completely generic and pluggable Flume master constructor. Used for test cases. Webserver is by * default off. */ public FlumeMaster( CommandManager cmd, ConfigurationManager cfgMan, StatusManager stat, MasterAckManager ack, FlumeConfiguration cfg) { instance = this; this.doHttp = false; this.cmdman = cmd; this.specman = cfgMan; this.statman = stat; this.ackman = ack; this.cfg = cfg; this.uniqueMasterName = "flume-master-" + cfg.getMasterServerId(); }
/** Create hive notification using defaults from FlumeConfiguration. */ public HiveDirCreatedNotification(String table, String dir, Map<String, String> meta) { FlumeConfiguration conf = FlumeConfiguration.get(); this.host = conf.getHiveHost(); this.port = conf.getHivePort(); this.user = conf.getHiveUser(); this.pw = conf.getHiveUserPW(); this.table = table; this.dir = dir; this.meta = meta; }
/** Creates a logical node that will accept any configuration */ public LogicalNode(Context ctx, String name) { this.nodeName = name; this.ctx = ctx; // Note: version and lastSeen aren't kept up-to-date on the logical node. // The master fills them in when it receives a NodeStatus heartbeat. state = new NodeStatus( NodeState.HELLO, 0, 0, NetUtils.localhost(), FlumeNode.getInstance().getPhysicalNodeName()); // Set version to -1 so that all non-negative versions will be 'later' lastGoodCfg = new FlumeConfigData( 0, "null", "null", VERSION_INFIMUM, VERSION_INFIMUM, FlumeConfiguration.get().getDefaultFlowName()); }
/** * This stops any existing connection (source=>sink pumper), and then creates a new one with the * specified *already opened* source and sink arguments. */ private void startNodeDriver() throws IOException { if (driver != null) { // stop the existing connector. driver.stop(); try { // default is 30s. long timeout = FlumeConfiguration.get().getNodeCloseTimeout(); if (!driver.join(timeout)) { LOG.error("Forcing driver to exit uncleanly"); driver.cancel(); // taking too long, cancel the thread } } catch (InterruptedException e) { LOG.error("Previous driver took too long to close!", e); } } // this will be replaceable with multi-threaded queueing versions or other // mechanisms driver = new DirectDriver("logicalNode " + nodeName, src, snk); this.state.state = NodeState.ACTIVE; driver.start(); reconfigures.incrementAndGet(); }
/** * This should attempts to kerberos login via a keytab if security is enabled in hadoop. * * <p>This should be able to support multiple hadoop clusters as long as the particular principal * is allowed on multiple clusters. * * <p>To preserve compatibility with non security enhanced hdfs, we use reflection on various * UserGroupInformation and SecurityUtil related method calls. */ @SuppressWarnings("unchecked") static void tryKerberosLogin() throws IOException { /* * UserGroupInformation is in hadoop 0.18 * UserGroupInformation.isSecurityEnabled() not in pre security API. * * boolean useSec = UserGroupInformation.isSecurityEnabled(); */ boolean useSec = false; try { Class<UserGroupInformation> c = UserGroupInformation.class; // static call, null this obj useSec = (Boolean) c.getMethod("isSecurityEnabled").invoke(null); } catch (Exception e) { LOG.warn( "Flume is using Hadoop core " + org.apache.hadoop.util.VersionInfo.getVersion() + " which does not support Security / Authentication: " + e.getMessage()); return; } LOG.info("Hadoop Security enabled: " + useSec); if (!useSec) { return; } // At this point we know we are using a hadoop library that is kerberos // enabled. // attempt to load kerberos information for authenticated hdfs comms. String principal = FlumeConfiguration.get().getKerberosPrincipal(); String keytab = FlumeConfiguration.get().getKerberosKeytab(); LOG.info("Kerberos login as " + principal + " from " + keytab); try { /* * SecurityUtil not present pre hadoop 20.2 * * SecurityUtil.login not in pre-security Hadoop API * * // Keytab login does not need to auto refresh * * SecurityUtil.login(FlumeConfiguration.get(), * FlumeConfiguration.SECURITY_KERBEROS_KEYTAB, * FlumeConfiguration.SECURITY_KERBEROS_PRINCIPAL); */ Class c = Class.forName("org.apache.hadoop.security.SecurityUtil"); // get method login(Configuration, String, String); Method m = c.getMethod("login", Configuration.class, String.class, String.class); m.invoke( null, FlumeConfiguration.get(), FlumeConfiguration.SECURITY_KERBEROS_KEYTAB, FlumeConfiguration.SECURITY_KERBEROS_PRINCIPAL); } catch (Exception e) { LOG.error( "Flume failed when attempting to authenticate with keytab " + FlumeConfiguration.get().getKerberosKeytab() + " and principal '" + FlumeConfiguration.get().getKerberosPrincipal() + "'", e); // e.getMessage() comes from hadoop is worthless return; } try { /* * getLoginUser, getAuthenticationMethod, and isLoginKeytabBased are not * in Hadoop 20.2, only kerberized enhanced version. * * getUserName is in all 0.18.3+ * * UserGroupInformation ugi = UserGroupInformation.getLoginUser(); * LOG.info("Auth method: " + ugi.getAuthenticationMethod()); * LOG.info(" User name: " + ugi.getUserName()); * LOG.info(" Using keytab: " + * UserGroupInformation.isLoginKeytabBased()); */ Class<UserGroupInformation> c2 = UserGroupInformation.class; // static call, null this obj UserGroupInformation ugi = (UserGroupInformation) c2.getMethod("getLoginUser").invoke(null); String authMethod = c2.getMethod("getAuthenticationMethod").invoke(ugi).toString(); boolean keytabBased = (Boolean) c2.getMethod("isLoginKeytabBased").invoke(ugi); LOG.info("Auth method: " + authMethod); LOG.info(" User name: " + ugi.getUserName()); LOG.info(" Using keytab: " + keytabBased); } catch (Exception e) { LOG.error("Flume was unable to dump kerberos login user" + " and authentication method", e); return; } }
/** * Returns a Flume Node with settings from specified command line parameters. (See usage for * instructions) * * @param argv * @return * @throws IOException */ public static FlumeNode setup(String[] argv) throws IOException { logVersion(LOG); logEnvironment(LOG); // Make sure the Java version is not older than 1.6 if (!CheckJavaVersion.isVersionOk()) { LOG.error("Exiting because of an old Java version or Java version in bad format"); System.exit(-1); } LOG.info("Starting flume agent on: " + NetUtils.localhost()); LOG.info(" Working directory is: " + new File(".").getAbsolutePath()); FlumeConfiguration.hardExitLoadConfig(); // will exit if conf file is bad. CommandLine cmd = null; Options options = new Options(); options.addOption("c", true, "Load initial config from cmdline arg"); options.addOption("n", true, "Set node name"); options.addOption("s", false, "Do not start local flume status server on node"); options.addOption("1", false, "Make flume node one shot (if closes or errors, exits)"); options.addOption("m", false, "Have flume hard exit if in likely GC thrash situation"); options.addOption("h", false, "Print help information"); options.addOption("v", false, "Print version information"); try { CommandLineParser parser = new PosixParser(); cmd = parser.parse(options, argv); } catch (ParseException e) { HelpFormatter fmt = new HelpFormatter(); fmt.printHelp("FlumeNode", options, true); return null; } // dump version info only if (cmd != null && cmd.hasOption("v")) { return null; } // dump help info. if (cmd != null && cmd.hasOption("h")) { HelpFormatter fmt = new HelpFormatter(); fmt.printHelp("FlumeNode", options, true); return null; } // Check FlumeConfiguration file for settings that may cause node to fail. nodeConfigChecksOk(); String nodename = NetUtils.localhost(); // default to local host name. if (cmd != null && cmd.hasOption("n")) { // select a different name, allow for multiple processes configured // differently on same node. nodename = cmd.getOptionValue("n"); } boolean startHttp = false; if (cmd != null && !cmd.hasOption("s")) { // no -s option, start the local status server startHttp = true; } boolean oneshot = false; if (cmd != null && cmd.hasOption("1")) { oneshot = true; } FormatFactory.loadOutputFormatPlugins(); // Instantiate the flume node. FlumeConfiguration conf = FlumeConfiguration.get(); FlumeNode flume = new FlumeNode(nodename, conf, startHttp, oneshot); flume.start(); // load an initial configuration from command line if (cmd != null && cmd.hasOption("c")) { String spec = cmd.getOptionValue("c"); LOG.info("Loading spec from command line: '" + spec + "'"); try { // node name is the default logical and physical name. Context ctx = new LogicalNodeContext(nodename, nodename); Map<String, Pair<String, String>> cfgs = FlumeBuilder.parseConf(ctx, spec); Pair<String, String> node = cfgs.get(nodename); FlumeConfigData fcd = new FlumeConfigData(0, node.getLeft(), node.getRight(), 0, 0, null); flume.nodesMan.spawn(ctx, nodename, fcd); } catch (Exception e) { LOG.warn("Caught exception loading node:" + e.getMessage()); LOG.debug("Exception: ", e); if (oneshot) { System.exit(0); // exit cleanly } } } else { try { // default to null configurations. Context ctx = new LogicalNodeContext(nodename, nodename); FlumeConfigData fcd = new FlumeConfigData(0, "null", "null", 0, 0, null); flume.nodesMan.spawn(ctx, nodename, fcd); } catch (Exception e) { LOG.error("Caught exception loading node", e); } } if (cmd != null && cmd.hasOption("m")) { // setup memory use monitor LOG.info("Setup hard exit on memory exhaustion"); MemoryMonitor.setupHardExitMemMonitor(FlumeConfiguration.get().getAgentMemoryThreshold()); } try { tryKerberosLogin(); } catch (IOException ioe) { LOG.error("Failed to kerberos login.", ioe); } // hangout, waiting for other agent thread to exit. return flume; }
/** * This hook makes it easy for web apps and jsps to get the current FlumeNode instance. This is * used to test the FlumeNode related jsps. */ public static FlumeNode getInstance() { if (instance == null) { instance = new FlumeNode(FlumeConfiguration.get()); } return instance; }
public FlumeNode(MasterRPC rpc, boolean startHttp, boolean oneshot) { this(FlumeConfiguration.get(), NetUtils.localhost(), rpc, startHttp, oneshot); }