/** Now, run the program. */ @Execute public void stepRunProgram(final RequestMonitor rm) { ICommand<MIInfo> command; if (useContinueCommand()) { command = fCommandFactory.createMIExecContinue(fContainerDmc); } else { command = fCommandFactory.createMIExecRun(fContainerDmc); } fCommandControl.queueCommand( command, new ImmediateDataRequestMonitor<MIInfo>(rm) { @Override protected void handleSuccess() { // Now that the process is started, the pid has been allocated // so we need to fetch the proper container context // We replace our current context which does not have the pid, with one that has the // pid. if (fContainerDmc instanceof IMIContainerDMContext) { fContainerDmc = fProcService.createContainerContextFromGroupId( fCommandControl.getContext(), ((IMIContainerDMContext) fContainerDmc).getGroupId()); // This is the container context that this sequence is supposed to return: set the // dataRm fDataRequestMonitor.setData(fContainerDmc); } else { assert false : "Container context was not an IMIContainerDMContext"; // $NON-NLS-1$ } rm.done(); } }); }
/** Specify the arguments to the program that will be run. */ @Execute public void stepSetArguments(RequestMonitor rm) { try { String args = fBackend.getProgramArguments(); if (args != null) { String[] argArray = args.replaceAll("\n", " ").split(" "); // $NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$ fCommandControl.queueCommand( fCommandFactory.createMIGDBSetArgs(getContainerContext(), argArray), new DataRequestMonitor<MIInfo>(ImmediateExecutor.getInstance(), rm)); } else { rm.done(); } } catch (CoreException e) { rm.setStatus( new Status( IStatus.ERROR, GdbPlugin.PLUGIN_ID, IDsfStatusConstants.REQUEST_FAILED, "Cannot get inferior arguments", e)); //$NON-NLS-1$ rm.done(); } }
/* (non-Javadoc) * @see org.eclipse.cdt.dsf.debug.service.IDisassembly#getInstructions(org.eclipse.cdt.dsf.debug.service.IDisassembly.IDisassemblyDMContext, java.math.BigInteger, java.math.BigInteger, org.eclipse.cdt.dsf.concurrent.DataRequestMonitor) */ public void getInstructions( IDisassemblyDMContext context, BigInteger startAddress, BigInteger endAddress, final DataRequestMonitor<IInstruction[]> drm) { // Validate the context if (context == null) { drm.setStatus( new Status( IStatus.ERROR, GdbPlugin.PLUGIN_ID, INTERNAL_ERROR, "Unknown context type", null)); //$NON-NLS-1$); drm.done(); return; } // Go for it String start = (startAddress != null) ? startAddress.toString() : "$pc"; // $NON-NLS-1$ String end = (endAddress != null) ? endAddress.toString() : "$pc + 100"; // $NON-NLS-1$ fConnection.queueCommand( fCommandFactory.createMIDataDisassemble(context, start, end, false), new DataRequestMonitor<MIDataDisassembleInfo>(getExecutor(), drm) { @Override protected void handleSuccess() { IInstruction[] result = getData().getMIAssemblyCode(); drm.setData(result); drm.done(); } }); }
/* (non-Javadoc) * @see org.eclipse.cdt.dsf.debug.service.IDisassembly#getMixedInstructions(org.eclipse.cdt.dsf.debug.service.IDisassembly.IDisassemblyDMContext, java.lang.String, int, int, org.eclipse.cdt.dsf.concurrent.DataRequestMonitor) */ public void getMixedInstructions( IDisassemblyDMContext context, String filename, int linenum, int lines, final DataRequestMonitor<IMixedInstruction[]> drm) { // Validate the context if (context == null) { drm.setStatus( new Status( IStatus.ERROR, GdbPlugin.PLUGIN_ID, INTERNAL_ERROR, "Unknown context type", null)); //$NON-NLS-1$); drm.done(); return; } // Go for it fConnection.queueCommand( fCommandFactory.createMIDataDisassemble(context, filename, linenum, lines, true), new DataRequestMonitor<MIDataDisassembleInfo>(getExecutor(), drm) { @Override protected void handleSuccess() { IMixedInstruction[] result = getData().getMIMixedCode(); drm.setData(result); drm.done(); } }); }
protected void setSubstitutePaths( ISourceLookupDMContext sourceLookupCtx, Map<String, String> entries, RequestMonitor rm) { fCachedEntries = entries; CountingRequestMonitor countingRm = new CountingRequestMonitor(getExecutor(), rm) { @Override protected void handleFailure() { /* * We failed to apply the changes. Clear the cache as it does * not represent the state of the backend. However we don't have * a good recovery here, so on future sourceContainersChanged() * calls we will simply reissue the substitutions. */ fCachedEntries = null; rm.done(); } }; countingRm.setDoneCount(entries.size()); for (Map.Entry<String, String> entry : entries.entrySet()) { fCommand.queueCommand( fCommandFactory.createMISetSubstitutePath( sourceLookupCtx, entry.getKey(), entry.getValue()), new DataRequestMonitor<MIInfo>(getExecutor(), countingRm)); } }
/** * This method does the necessary work to setup the input/output streams for the inferior process, * by either preparing the PTY to be used, or by simply leaving the PTY null, which indicates that * the input/output streams of the CLI should be used instead; this decision is based on the type * of session. */ @Execute public void stepInitializeInputOutput(final RequestMonitor rm) { if (fBackend.getSessionType() == SessionType.REMOTE && !fBackend.getIsAttachSession()) { // Remote non-attach sessions don't support multi-process and therefore will not // start new processes. Those sessions will only start the one process, which should // not have a console, because it's output is handled by GDB server. fPty = null; rm.done(); } else { // Every other type of session that can get to this code, is starting a new process // and requires a pty for it. try { fPty = new PTY(); // Tell GDB to use this PTY fCommandControl.queueCommand( fCommandFactory.createMIInferiorTTYSet( (IMIContainerDMContext) getContainerContext(), fPty.getSlaveName()), new ImmediateDataRequestMonitor<MIInfo>(rm) { @Override protected void handleFailure() { // We were not able to tell GDB to use the PTY // so we won't use it at all. fPty = null; rm.done(); } }); } catch (IOException e) { fPty = null; rm.done(); } } }
/** * If reverse debugging, set a breakpoint on main to be able to enable reverse as early as * possible. If the user has requested a stop at the same point, we could skip this breakpoint * however, we have to first set it to find out! So, we just leave it. */ @Execute public void stepSetBreakpointForReverse(final RequestMonitor rm) { if (fReverseEnabled) { IBreakpointsTargetDMContext bpTargetDmc = DMContexts.getAncestorOfType(getContainerContext(), IBreakpointsTargetDMContext.class); fCommandControl.queueCommand( fCommandFactory.createMIBreakInsert( bpTargetDmc, true, false, null, 0, ICDTLaunchConfigurationConstants.DEBUGGER_STOP_AT_MAIN_SYMBOL_DEFAULT, 0), new ImmediateDataRequestMonitor<MIBreakInsertInfo>(rm) { @Override public void handleSuccess() { if (getData() != null) { MIBreakpoint[] breakpoints = getData().getMIBreakpoints(); if (breakpoints.length > 0 && fUserBreakpoint != null) { fUserBreakpointIsOnMain = breakpoints[0].getAddress().equals(fUserBreakpoint.getAddress()); } } rm.done(); } }); } else { rm.done(); } }
/** * Finally, if we are enabling reverse, and the userSymbolStop is not on main, we should do a * continue because we are currently stopped on main but that is not what the user requested */ @Execute public void stepContinue(RequestMonitor rm) { if (fReverseEnabled && !fUserBreakpointIsOnMain) { fCommandControl.queueCommand( fCommandFactory.createMIExecContinue(fContainerDmc), new ImmediateDataRequestMonitor<MIInfo>(rm)); } else { rm.done(); } }
/** * If we are dealing with a remote debugging session, connect to the target. * * @since 4.0 */ @Execute public void stepRemoteConnection(RequestMonitor rm) { // If we are dealing with a non-attach remote session, it is now time to connect // to the remote side. Note that this is the 'target remote' case // and not the 'target extended-remote' case (remote attach session) // This step is actually global for GDB. However, we have to do it after // we have specified the executable, so we have to do it here. // It is safe to do it here because a 'target remote' does not support // multi-process so this step will not be executed more than once. if (fBackend.getSessionType() == SessionType.REMOTE && !fBackend.getIsAttachSession()) { boolean isTcpConnection = CDebugUtils.getAttribute( fAttributes, IGDBLaunchConfigurationConstants.ATTR_REMOTE_TCP, false); if (isTcpConnection) { String remoteTcpHost = CDebugUtils.getAttribute( fAttributes, IGDBLaunchConfigurationConstants.ATTR_HOST, INVALID); String remoteTcpPort = CDebugUtils.getAttribute( fAttributes, IGDBLaunchConfigurationConstants.ATTR_PORT, INVALID); fCommandControl.queueCommand( fCommandFactory.createMITargetSelect( fCommandControl.getContext(), remoteTcpHost, remoteTcpPort, false), new DataRequestMonitor<MIInfo>(ImmediateExecutor.getInstance(), rm)); } else { String serialDevice = CDebugUtils.getAttribute( fAttributes, IGDBLaunchConfigurationConstants.ATTR_DEV, INVALID); fCommandControl.queueCommand( fCommandFactory.createMITargetSelect(fCommandControl.getContext(), serialDevice, false), new DataRequestMonitor<MIInfo>(ImmediateExecutor.getInstance(), rm)); } } else { rm.done(); } }
/** Specify the executable file to be debugged and read the symbol table. */ @Execute public void stepSetExecutable(RequestMonitor rm) { boolean noFileCommand = CDebugUtils.getAttribute( fAttributes, IGDBLaunchConfigurationConstants.ATTR_DEBUGGER_USE_SOLIB_SYMBOLS_FOR_APP, IGDBLaunchConfigurationConstants.DEBUGGER_USE_SOLIB_SYMBOLS_FOR_APP_DEFAULT); if (!noFileCommand && fBinaryName != null && fBinaryName.length() > 0) { fCommandControl.queueCommand( fCommandFactory.createMIFileExecAndSymbols(getContainerContext(), fBinaryName), new DataRequestMonitor<MIInfo>(ImmediateExecutor.getInstance(), rm)); } else { rm.done(); } }
@Override public void sourceContainersChanged( final ISourceLookupDMContext sourceLookupCtx, final DataRequestMonitor<Boolean> rm) { if (!fDirectors.containsKey(sourceLookupCtx)) { rm.setStatus( new Status( IStatus.ERROR, GdbPlugin.PLUGIN_ID, IDsfStatusConstants.INVALID_HANDLE, "No source director configured for given context", null)); //$NON-NLS-1$ ); rm.done(); return; } Map<String, String> entries = getSubstitutionsPaths(sourceLookupCtx); if (entries.equals(fCachedEntries)) { rm.done(false); } else { /* * Issue the clear and set commands back to back so that the * executor thread atomically changes the source lookup settings. * Any commands to GDB issued after this call will get the new * source substitute settings. */ CountingRequestMonitor countingRm = new CountingRequestMonitor(getExecutor(), rm) { @Override protected void handleSuccess() { rm.done(true); } }; fCommand.queueCommand( fCommandFactory.createCLIUnsetSubstitutePath(sourceLookupCtx), new DataRequestMonitor<MIInfo>(getExecutor(), countingRm)); initializeSourceSubstitutions(sourceLookupCtx, new RequestMonitor(getExecutor(), countingRm)); countingRm.setDoneCount(2); } }
/** * If the user requested a 'stopAtMain', let's set the temporary breakpoint where the user * specified. */ @Execute public void stepInsertStopOnMainBreakpoint(final RequestMonitor rm) { boolean userRequestedStop = CDebugUtils.getAttribute( fAttributes, ICDTLaunchConfigurationConstants.ATTR_DEBUGGER_STOP_AT_MAIN, LaunchUtils.getStopAtMainDefault()); if (userRequestedStop) { String userStopSymbol = CDebugUtils.getAttribute( fAttributes, ICDTLaunchConfigurationConstants.ATTR_DEBUGGER_STOP_AT_MAIN_SYMBOL, LaunchUtils.getStopAtMainSymbolDefault()); IBreakpointsTargetDMContext bpTargetDmc = DMContexts.getAncestorOfType(getContainerContext(), IBreakpointsTargetDMContext.class); fCommandControl.queueCommand( fCommandFactory.createMIBreakInsert(bpTargetDmc, true, false, null, 0, userStopSymbol, 0), new ImmediateDataRequestMonitor<MIBreakInsertInfo>(rm) { @Override public void handleSuccess() { if (getData() != null) { MIBreakpoint[] breakpoints = getData().getMIBreakpoints(); if (breakpoints.length > 0) { fUserBreakpoint = breakpoints[0]; } } rm.done(); } }); } else { rm.done(); } }
/** * If we are dealing with a postmortem session, connect to the core/trace file. * * @since 4.0 */ @Execute public void stepSpecifyCoreFile(final RequestMonitor rm) { // If we are dealing with a postmortem session, it is now time to connect // to the core/trace file. We have to do this step after // we have specified the executable, so we have to do it here. // It is safe to do it here because a postmortem session does not support // multi-process so this step will not be executed more than once. // Bug 338730 if (fBackend.getSessionType() == SessionType.CORE) { String coreFile = CDebugUtils.getAttribute( fAttributes, ICDTLaunchConfigurationConstants.ATTR_COREFILE_PATH, ""); // $NON-NLS-1$ final String coreType = CDebugUtils.getAttribute( fAttributes, IGDBLaunchConfigurationConstants.ATTR_DEBUGGER_POST_MORTEM_TYPE, IGDBLaunchConfigurationConstants.DEBUGGER_POST_MORTEM_TYPE_DEFAULT); if (coreFile.length() == 0) { new PromptForCoreJob( "Prompt for post mortem file", //$NON-NLS-1$ new DataRequestMonitor<String>(getExecutor(), rm) { @Override protected void handleCancel() { rm.cancel(); rm.done(); } @Override protected void handleSuccess() { String newCoreFile = getData(); if (newCoreFile == null || newCoreFile.length() == 0) { rm.setStatus( new Status( IStatus.ERROR, GdbPlugin.PLUGIN_ID, -1, "Cannot get post mortem file path", null)); //$NON-NLS-1$ rm.done(); } else { if (coreType.equals( IGDBLaunchConfigurationConstants.DEBUGGER_POST_MORTEM_CORE_FILE)) { fCommandControl.queueCommand( fCommandFactory.createMITargetSelectCore( fCommandControl.getContext(), newCoreFile), new DataRequestMonitor<MIInfo>(getExecutor(), rm)); } else if (coreType.equals( IGDBLaunchConfigurationConstants.DEBUGGER_POST_MORTEM_TRACE_FILE)) { IGDBTraceControl traceControl = fTracker.getService(IGDBTraceControl.class); if (traceControl != null) { ITraceTargetDMContext targetDmc = DMContexts.getAncestorOfType( fCommandControl.getContext(), ITraceTargetDMContext.class); traceControl.loadTraceData(targetDmc, newCoreFile, rm); } else { rm.setStatus( new Status( IStatus.ERROR, GdbPlugin.PLUGIN_ID, -1, "Tracing not supported", null)); rm.done(); } } else { rm.setStatus( new Status( IStatus.ERROR, GdbPlugin.PLUGIN_ID, -1, "Invalid post-mortem type", null)); rm.done(); } } } }) .schedule(); } else { if (coreType.equals(IGDBLaunchConfigurationConstants.DEBUGGER_POST_MORTEM_CORE_FILE)) { fCommandControl.queueCommand( fCommandFactory.createMITargetSelectCore(fCommandControl.getContext(), coreFile), new DataRequestMonitor<MIInfo>(getExecutor(), rm)); } else if (coreType.equals( IGDBLaunchConfigurationConstants.DEBUGGER_POST_MORTEM_TRACE_FILE)) { IGDBTraceControl traceControl = fTracker.getService(IGDBTraceControl.class); if (traceControl != null) { ITraceTargetDMContext targetDmc = DMContexts.getAncestorOfType( fCommandControl.getContext(), ITraceTargetDMContext.class); traceControl.loadTraceData(targetDmc, coreFile, rm); } else { rm.setStatus( new Status(IStatus.ERROR, GdbPlugin.PLUGIN_ID, -1, "Tracing not supported", null)); rm.done(); } } else { rm.setStatus( new Status(IStatus.ERROR, GdbPlugin.PLUGIN_ID, -1, "Invalid post-mortem type", null)); rm.done(); } } } else { rm.done(); } }
/** * This method processes "load info" requests. The load is computed using a sampling method; two * readings of a local or remote /proc/stat file are done with a delay in between. Then the load * is computed from the two samples, for all CPUs/cores known in the system. * * <p>Because of the method used, it's possible that fast variations in CPU usage will be missed. * However longer load trends should be reflected in the results. * * <p>To avoid generating too much load in the remote case, there is a cache that will return the * already computed load, if requested multiple times in a short period. There is also a mechanism * to queue subsequent requests if one is ongoing. Upon completion of the ongoing request, any * queued request is answered with the load that was just computed. * * @since 4.2 */ @Override public void getLoadInfo(final IDMContext context, final DataRequestMonitor<ILoadInfo> rm) { if (!(context instanceof ICoreDMContext) && !(context instanceof ICPUDMContext)) { // we only support getting the load for a CPU or a core rm.done( new Status( IStatus.ERROR, GdbPlugin.PLUGIN_ID, INVALID_HANDLE, "Load information not supported for this context type", null)); //$NON-NLS-1$ return; } // The measurement interval should be of a minimum length to be meaningful assert (LOAD_SAMPLE_DELAY >= 100); // so the cache is useful assert (LOAD_CACHE_LIFETIME >= LOAD_SAMPLE_DELAY); // This way of computing the CPU load is only applicable to Linux if (!supportsProcPseudoFS()) { rm.done( new Status( IStatus.ERROR, GdbPlugin.PLUGIN_ID, NOT_SUPPORTED, "Operation not supported", null)); //$NON-NLS-1$ return; } // Is a request is already ongoing? if (fLoadRequestOngoing) { // queue current new request fLoadInfoRequestCache.put(context, rm); return; } // no request ongoing, so proceed fLoadRequestOngoing = true; // caching mechanism to keep things sane, even if the views(s) // request load information very often. long currentTime = System.currentTimeMillis(); // time to fetch fresh load information? if (fLastCpuLoadRefresh + LOAD_CACHE_LIFETIME < currentTime) { fLastCpuLoadRefresh = currentTime; } else { // not time yet... re-use cached load data processLoads(context, rm, fCachedLoads); fLoadRequestOngoing = false; return; } final ProcStatParser procStatParser = new ProcStatParser(); final ICommandControlDMContext dmc = DMContexts.getAncestorOfType(context, ICommandControlDMContext.class); final String statFile = "/proc/stat"; // $NON-NLS-1$ final String localFile = sTempFolder + "proc.stat." + getSession().getId(); // $NON-NLS-1$ // Remote debugging? We will ask GDB to get us the /proc/stat file from target, twice, with a // delay between. if (fBackend.getSessionType() == SessionType.REMOTE) { fCommandControl.queueCommand( fCommandFactory.createCLIRemoteGet(dmc, statFile, localFile), new ImmediateDataRequestMonitor<MIInfo>(rm) { @Override protected void handleCompleted() { if (!isSuccess()) { fLoadRequestOngoing = false; rm.done( new Status( IStatus.ERROR, GdbPlugin.PLUGIN_ID, INTERNAL_ERROR, "Can't get load info for CPU", null)); //$NON-NLS-1$ return; } // Success - parse first set of stat counters try { procStatParser.parseStatFile(localFile); } catch (Exception e) { rm.done( new Status( IStatus.ERROR, GdbPlugin.PLUGIN_ID, INTERNAL_ERROR, "Can't get load info for CPU", null)); //$NON-NLS-1$ fLoadRequestOngoing = false; return; } // delete temp file new File(localFile).delete(); getExecutor() .schedule( new Runnable() { @Override public void run() { fCommandControl.queueCommand( fCommandFactory.createCLIRemoteGet(dmc, statFile, localFile), new ImmediateDataRequestMonitor<MIInfo>(rm) { @Override protected void handleCompleted() { if (!isSuccess()) { fLoadRequestOngoing = false; rm.done( new Status( IStatus.ERROR, GdbPlugin.PLUGIN_ID, INTERNAL_ERROR, "Can't get load info for CPU", null)); //$NON-NLS-1$ return; } // Success - parse the second set of stat counters and compute // loads try { procStatParser.parseStatFile(localFile); } catch (Exception e) { rm.done( new Status( IStatus.ERROR, GdbPlugin.PLUGIN_ID, INTERNAL_ERROR, "Can't get load info for CPU", null)); //$NON-NLS-1$ fLoadRequestOngoing = false; return; } // delete temp file new File(localFile).delete(); // Compute load fCachedLoads = procStatParser.getCpuLoad(); processLoads(context, rm, fCachedLoads); // done with request fLoadRequestOngoing = false; // process any queued request for (Entry<IDMContext, DataRequestMonitor<ILoadInfo>> e : fLoadInfoRequestCache.entrySet()) { processLoads(e.getKey(), e.getValue(), fCachedLoads); } fLoadInfoRequestCache.clear(); } }); } }, LOAD_SAMPLE_DELAY, TimeUnit.MILLISECONDS); } }); // Local debugging? Then we can read /proc/stat directly } else { // Read /proc/stat file for the first time try { procStatParser.parseStatFile(statFile); } catch (Exception e) { rm.done( new Status( IStatus.ERROR, GdbPlugin.PLUGIN_ID, INTERNAL_ERROR, "Can't get load info for CPU", null)); //$NON-NLS-1$ fLoadRequestOngoing = false; return; } // Read /proc/stat file again after a delay getExecutor() .schedule( new Runnable() { @Override public void run() { try { procStatParser.parseStatFile(statFile); } catch (Exception e) { rm.done( new Status( IStatus.ERROR, GdbPlugin.PLUGIN_ID, INTERNAL_ERROR, "Can't get load info for CPU", null)); //$NON-NLS-1$ fLoadRequestOngoing = false; return; } // compute load fCachedLoads = procStatParser.getCpuLoad(); processLoads(context, rm, fCachedLoads); // done with request fLoadRequestOngoing = false; // process any queued request for (Entry<IDMContext, DataRequestMonitor<ILoadInfo>> e : fLoadInfoRequestCache.entrySet()) { processLoads(e.getKey(), e.getValue(), fCachedLoads); } fLoadInfoRequestCache.clear(); } }, LOAD_SAMPLE_DELAY, TimeUnit.MILLISECONDS); } }