/** Start executing the program. */ @Execute public void stepStartExecution(final RequestMonitor rm) { if (fBackend.getSessionType() != SessionType.CORE) { // Overwrite the program name to use the binary name that was specified. // This is important for multi-process // Bug 342351 fAttributes.put(ICDTLaunchConfigurationConstants.ATTR_PROGRAM_NAME, fBinaryName); fProcService.start( getContainerContext(), fAttributes, new DataRequestMonitor<IContainerDMContext>(ImmediateExecutor.getInstance(), rm) { @Override protected void handleSuccess() { assert getData() instanceof IMIContainerDMContext; // Set the container that we created setContainerContext( DMContexts.getAncestorOfType(getData(), IMIContainerDMContext.class)); fDataRequestMonitor.setData(getContainerContext()); // Don't call fDataRequestMonitor.done(), the sequence will // automatically do that when it completes; rm.done(); } }); } else { fDataRequestMonitor.setData(getContainerContext()); rm.done(); } }
/** * This method does the necessary work to setup the input/output streams for the inferior process, * by either preparing the PTY to be used, or by simply leaving the PTY null, which indicates that * the input/output streams of the CLI should be used instead; this decision is based on the type * of session. */ @Execute public void stepInitializeInputOutput(final RequestMonitor rm) { if (fBackend.getSessionType() == SessionType.REMOTE && !fBackend.getIsAttachSession()) { // Remote non-attach sessions don't support multi-process and therefore will not // start new processes. Those sessions will only start the one process, which should // not have a console, because it's output is handled by GDB server. fPty = null; rm.done(); } else { // Every other type of session that can get to this code, is starting a new process // and requires a pty for it. try { fPty = new PTY(); // Tell GDB to use this PTY fCommandControl.queueCommand( fCommandFactory.createMIInferiorTTYSet( (IMIContainerDMContext) getContainerContext(), fPty.getSlaveName()), new ImmediateDataRequestMonitor<MIInfo>(rm) { @Override protected void handleFailure() { // We were not able to tell GDB to use the PTY // so we won't use it at all. fPty = null; rm.done(); } }); } catch (IOException e) { fPty = null; rm.done(); } } }
/** @return true if supports fetching OS info from /proc pseudo-filesystem */ private boolean supportsProcPseudoFS() { if (Platform.getOS().equals(Platform.OS_LINUX)) return true; // for non-linux platform, support only remote (linux? ) targets if (SessionType.REMOTE == fBackend.getSessionType()) { return true; } return false; }
/** * Start tracking the breakpoints. Note that for remote debugging we should first connect to the * target. */ @Execute public void stepStartTrackingBreakpoints(RequestMonitor rm) { if (fBackend.getSessionType() != SessionType.CORE) { MIBreakpointsManager bpmService = fTracker.getService(MIBreakpointsManager.class); IBreakpointsTargetDMContext bpTargetDmc = DMContexts.getAncestorOfType(getContainerContext(), IBreakpointsTargetDMContext.class); bpmService.startTrackingBreakpoints(bpTargetDmc, rm); } else { rm.done(); } }
/** * This method indicates if we should use the -exec-continue command instead of the -exec-run * command. This method can be overridden to allow for customization. */ protected boolean useContinueCommand() { // Note that restart does not apply to remote sessions IGDBBackend backend = fTracker.getService(IGDBBackend.class); if (backend == null) { return false; } // When doing remote non-attach debugging, we use -exec-continue instead of -exec-run // For remote attach, if we get here it is that we are starting a new process // (multi-process), so we want to use -exec-run return backend.getSessionType() == SessionType.REMOTE && !backend.getIsAttachSession(); }
/** * If we are dealing with a remote debugging session, connect to the target. * * @since 4.0 */ @Execute public void stepRemoteConnection(RequestMonitor rm) { // If we are dealing with a non-attach remote session, it is now time to connect // to the remote side. Note that this is the 'target remote' case // and not the 'target extended-remote' case (remote attach session) // This step is actually global for GDB. However, we have to do it after // we have specified the executable, so we have to do it here. // It is safe to do it here because a 'target remote' does not support // multi-process so this step will not be executed more than once. if (fBackend.getSessionType() == SessionType.REMOTE && !fBackend.getIsAttachSession()) { boolean isTcpConnection = CDebugUtils.getAttribute( fAttributes, IGDBLaunchConfigurationConstants.ATTR_REMOTE_TCP, false); if (isTcpConnection) { String remoteTcpHost = CDebugUtils.getAttribute( fAttributes, IGDBLaunchConfigurationConstants.ATTR_HOST, INVALID); String remoteTcpPort = CDebugUtils.getAttribute( fAttributes, IGDBLaunchConfigurationConstants.ATTR_PORT, INVALID); fCommandControl.queueCommand( fCommandFactory.createMITargetSelect( fCommandControl.getContext(), remoteTcpHost, remoteTcpPort, false), new DataRequestMonitor<MIInfo>(ImmediateExecutor.getInstance(), rm)); } else { String serialDevice = CDebugUtils.getAttribute( fAttributes, IGDBLaunchConfigurationConstants.ATTR_DEV, INVALID); fCommandControl.queueCommand( fCommandFactory.createMITargetSelect(fCommandControl.getContext(), serialDevice, false), new DataRequestMonitor<MIInfo>(ImmediateExecutor.getInstance(), rm)); } } else { rm.done(); } }
/** * If we are dealing with a postmortem session, connect to the core/trace file. * * @since 4.0 */ @Execute public void stepSpecifyCoreFile(final RequestMonitor rm) { // If we are dealing with a postmortem session, it is now time to connect // to the core/trace file. We have to do this step after // we have specified the executable, so we have to do it here. // It is safe to do it here because a postmortem session does not support // multi-process so this step will not be executed more than once. // Bug 338730 if (fBackend.getSessionType() == SessionType.CORE) { String coreFile = CDebugUtils.getAttribute( fAttributes, ICDTLaunchConfigurationConstants.ATTR_COREFILE_PATH, ""); // $NON-NLS-1$ final String coreType = CDebugUtils.getAttribute( fAttributes, IGDBLaunchConfigurationConstants.ATTR_DEBUGGER_POST_MORTEM_TYPE, IGDBLaunchConfigurationConstants.DEBUGGER_POST_MORTEM_TYPE_DEFAULT); if (coreFile.length() == 0) { new PromptForCoreJob( "Prompt for post mortem file", //$NON-NLS-1$ new DataRequestMonitor<String>(getExecutor(), rm) { @Override protected void handleCancel() { rm.cancel(); rm.done(); } @Override protected void handleSuccess() { String newCoreFile = getData(); if (newCoreFile == null || newCoreFile.length() == 0) { rm.setStatus( new Status( IStatus.ERROR, GdbPlugin.PLUGIN_ID, -1, "Cannot get post mortem file path", null)); //$NON-NLS-1$ rm.done(); } else { if (coreType.equals( IGDBLaunchConfigurationConstants.DEBUGGER_POST_MORTEM_CORE_FILE)) { fCommandControl.queueCommand( fCommandFactory.createMITargetSelectCore( fCommandControl.getContext(), newCoreFile), new DataRequestMonitor<MIInfo>(getExecutor(), rm)); } else if (coreType.equals( IGDBLaunchConfigurationConstants.DEBUGGER_POST_MORTEM_TRACE_FILE)) { IGDBTraceControl traceControl = fTracker.getService(IGDBTraceControl.class); if (traceControl != null) { ITraceTargetDMContext targetDmc = DMContexts.getAncestorOfType( fCommandControl.getContext(), ITraceTargetDMContext.class); traceControl.loadTraceData(targetDmc, newCoreFile, rm); } else { rm.setStatus( new Status( IStatus.ERROR, GdbPlugin.PLUGIN_ID, -1, "Tracing not supported", null)); rm.done(); } } else { rm.setStatus( new Status( IStatus.ERROR, GdbPlugin.PLUGIN_ID, -1, "Invalid post-mortem type", null)); rm.done(); } } } }) .schedule(); } else { if (coreType.equals(IGDBLaunchConfigurationConstants.DEBUGGER_POST_MORTEM_CORE_FILE)) { fCommandControl.queueCommand( fCommandFactory.createMITargetSelectCore(fCommandControl.getContext(), coreFile), new DataRequestMonitor<MIInfo>(getExecutor(), rm)); } else if (coreType.equals( IGDBLaunchConfigurationConstants.DEBUGGER_POST_MORTEM_TRACE_FILE)) { IGDBTraceControl traceControl = fTracker.getService(IGDBTraceControl.class); if (traceControl != null) { ITraceTargetDMContext targetDmc = DMContexts.getAncestorOfType( fCommandControl.getContext(), ITraceTargetDMContext.class); traceControl.loadTraceData(targetDmc, coreFile, rm); } else { rm.setStatus( new Status(IStatus.ERROR, GdbPlugin.PLUGIN_ID, -1, "Tracing not supported", null)); rm.done(); } } else { rm.setStatus( new Status(IStatus.ERROR, GdbPlugin.PLUGIN_ID, -1, "Invalid post-mortem type", null)); rm.done(); } } } else { rm.done(); } }
/** * This method processes "load info" requests. The load is computed using a sampling method; two * readings of a local or remote /proc/stat file are done with a delay in between. Then the load * is computed from the two samples, for all CPUs/cores known in the system. * * <p>Because of the method used, it's possible that fast variations in CPU usage will be missed. * However longer load trends should be reflected in the results. * * <p>To avoid generating too much load in the remote case, there is a cache that will return the * already computed load, if requested multiple times in a short period. There is also a mechanism * to queue subsequent requests if one is ongoing. Upon completion of the ongoing request, any * queued request is answered with the load that was just computed. * * @since 4.2 */ @Override public void getLoadInfo(final IDMContext context, final DataRequestMonitor<ILoadInfo> rm) { if (!(context instanceof ICoreDMContext) && !(context instanceof ICPUDMContext)) { // we only support getting the load for a CPU or a core rm.done( new Status( IStatus.ERROR, GdbPlugin.PLUGIN_ID, INVALID_HANDLE, "Load information not supported for this context type", null)); //$NON-NLS-1$ return; } // The measurement interval should be of a minimum length to be meaningful assert (LOAD_SAMPLE_DELAY >= 100); // so the cache is useful assert (LOAD_CACHE_LIFETIME >= LOAD_SAMPLE_DELAY); // This way of computing the CPU load is only applicable to Linux if (!supportsProcPseudoFS()) { rm.done( new Status( IStatus.ERROR, GdbPlugin.PLUGIN_ID, NOT_SUPPORTED, "Operation not supported", null)); //$NON-NLS-1$ return; } // Is a request is already ongoing? if (fLoadRequestOngoing) { // queue current new request fLoadInfoRequestCache.put(context, rm); return; } // no request ongoing, so proceed fLoadRequestOngoing = true; // caching mechanism to keep things sane, even if the views(s) // request load information very often. long currentTime = System.currentTimeMillis(); // time to fetch fresh load information? if (fLastCpuLoadRefresh + LOAD_CACHE_LIFETIME < currentTime) { fLastCpuLoadRefresh = currentTime; } else { // not time yet... re-use cached load data processLoads(context, rm, fCachedLoads); fLoadRequestOngoing = false; return; } final ProcStatParser procStatParser = new ProcStatParser(); final ICommandControlDMContext dmc = DMContexts.getAncestorOfType(context, ICommandControlDMContext.class); final String statFile = "/proc/stat"; // $NON-NLS-1$ final String localFile = sTempFolder + "proc.stat." + getSession().getId(); // $NON-NLS-1$ // Remote debugging? We will ask GDB to get us the /proc/stat file from target, twice, with a // delay between. if (fBackend.getSessionType() == SessionType.REMOTE) { fCommandControl.queueCommand( fCommandFactory.createCLIRemoteGet(dmc, statFile, localFile), new ImmediateDataRequestMonitor<MIInfo>(rm) { @Override protected void handleCompleted() { if (!isSuccess()) { fLoadRequestOngoing = false; rm.done( new Status( IStatus.ERROR, GdbPlugin.PLUGIN_ID, INTERNAL_ERROR, "Can't get load info for CPU", null)); //$NON-NLS-1$ return; } // Success - parse first set of stat counters try { procStatParser.parseStatFile(localFile); } catch (Exception e) { rm.done( new Status( IStatus.ERROR, GdbPlugin.PLUGIN_ID, INTERNAL_ERROR, "Can't get load info for CPU", null)); //$NON-NLS-1$ fLoadRequestOngoing = false; return; } // delete temp file new File(localFile).delete(); getExecutor() .schedule( new Runnable() { @Override public void run() { fCommandControl.queueCommand( fCommandFactory.createCLIRemoteGet(dmc, statFile, localFile), new ImmediateDataRequestMonitor<MIInfo>(rm) { @Override protected void handleCompleted() { if (!isSuccess()) { fLoadRequestOngoing = false; rm.done( new Status( IStatus.ERROR, GdbPlugin.PLUGIN_ID, INTERNAL_ERROR, "Can't get load info for CPU", null)); //$NON-NLS-1$ return; } // Success - parse the second set of stat counters and compute // loads try { procStatParser.parseStatFile(localFile); } catch (Exception e) { rm.done( new Status( IStatus.ERROR, GdbPlugin.PLUGIN_ID, INTERNAL_ERROR, "Can't get load info for CPU", null)); //$NON-NLS-1$ fLoadRequestOngoing = false; return; } // delete temp file new File(localFile).delete(); // Compute load fCachedLoads = procStatParser.getCpuLoad(); processLoads(context, rm, fCachedLoads); // done with request fLoadRequestOngoing = false; // process any queued request for (Entry<IDMContext, DataRequestMonitor<ILoadInfo>> e : fLoadInfoRequestCache.entrySet()) { processLoads(e.getKey(), e.getValue(), fCachedLoads); } fLoadInfoRequestCache.clear(); } }); } }, LOAD_SAMPLE_DELAY, TimeUnit.MILLISECONDS); } }); // Local debugging? Then we can read /proc/stat directly } else { // Read /proc/stat file for the first time try { procStatParser.parseStatFile(statFile); } catch (Exception e) { rm.done( new Status( IStatus.ERROR, GdbPlugin.PLUGIN_ID, INTERNAL_ERROR, "Can't get load info for CPU", null)); //$NON-NLS-1$ fLoadRequestOngoing = false; return; } // Read /proc/stat file again after a delay getExecutor() .schedule( new Runnable() { @Override public void run() { try { procStatParser.parseStatFile(statFile); } catch (Exception e) { rm.done( new Status( IStatus.ERROR, GdbPlugin.PLUGIN_ID, INTERNAL_ERROR, "Can't get load info for CPU", null)); //$NON-NLS-1$ fLoadRequestOngoing = false; return; } // compute load fCachedLoads = procStatParser.getCpuLoad(); processLoads(context, rm, fCachedLoads); // done with request fLoadRequestOngoing = false; // process any queued request for (Entry<IDMContext, DataRequestMonitor<ILoadInfo>> e : fLoadInfoRequestCache.entrySet()) { processLoads(e.getKey(), e.getValue(), fCachedLoads); } fLoadInfoRequestCache.clear(); } }, LOAD_SAMPLE_DELAY, TimeUnit.MILLISECONDS); } }