/** * recompile an execution state * * @param state a list of execution states * @param invalidate Is this an invalidation? * @return the compiled method for the root state */ public static CompiledMethod recompileState(ExecutionState state, boolean invalidate) { // compile from callee to caller CompiledMethod newCM = null; do { if (!invalidate) { newCM = optCompile(state); } else { newCM = baselineCompile(state); } if (VM.TraceOnStackReplacement) { VM.sysWriteln( "new CMID 0x" + Integer.toHexString(newCM.getId()) + "(" + newCM.getId() + ") for " + newCM.getMethod()); } if (state.callerState == null) break; state = state.callerState; // set callee_cmid of the caller state.callee_cmid = newCM.getId(); } while (true); return newCM; }
/* Compiles the method with the baseline compiler. * 1. generate prologue (PSEUDO_bytecode) from the state. * 2. make up new byte code with prologue. * 3. set method's bytecode to the specilizaed byte code. * 4. call BaselineCompilerImpl.compile, * the 'compile' method is customized to process pseudo instructions, * and it will reset the byte code to the original one, and adjust * the map from bytecode to the generated machine code. then the * reference map can be generated corrected relying on the original * bytecode. * NOTE: this is different from optCompile which resets the * bytecode after compilation. I believe this minimizes the * work to change both compilers. */ public static CompiledMethod baselineCompile(ExecutionState state) { NormalMethod method = state.getMethod(); if (VM.TraceOnStackReplacement) { VM.sysWriteln("BASE : starts compiling " + method); } /* generate prologue bytes */ byte[] prologue = state.generatePrologue(); if (VM.TraceOnStackReplacement) { VM.sysWriteln("prologue length " + prologue.length); } // the compiler will call setForOsrSpecialization after generating the reference map /* set a flag for specialization, compiler will see it, and * know how to do it properly. */ method.setForOsrSpecialization(prologue, state.getMaxStackHeight()); /* for baseline compilation, we do not adjust the exception table and line table * because the compiler will generate maps after compilation. * Any necessary adjustment should be made during the compilation */ CompiledMethod newCompiledMethod = BaselineCompilerImpl.compile(method); // compiled method was already set by BaselineCompilerImpl.compile // the call here does nothing // method.finalizeOsrSpecialization(); // mark the method is a specialized one newCompiledMethod.setSpecialForOSR(); if (VM.TraceOnStackReplacement) { // ((BaselineCompiledMethod)newCompiledMethod).printCodeMapEntries(); VM.sysWriteln( "BASE : done, CMID 0x" + Integer.toHexString(newCompiledMethod.getId()) + "(" + newCompiledMethod.getId() + ") JTOC offset " + VM.addressAsHexString(newCompiledMethod.getOsrJTOCoffset().toWord().toAddress())); } return newCompiledMethod; }
protected void executeAction( ExecutionState estate, CatalogDAO c, WorkerGroup wg, DBResultConsumer resultConsumer) throws Throwable { SQLCommand sql = entities.getCommand(c); try { if (!sql.isEmpty()) { WorkerRequest req = new WorkerExecuteRequest(estate.getConnection().getNonTransactionalContext(), sql) .onDatabase(database); if (CatalogModificationExecutionStep.Action.ALTER == action && BroadcastDistributionModel.SINGLETON.equals(optionalModel)) { resultConsumer.setRowAdjuster( BroadcastDistributionModel.SINGLETON .getUpdateAdjuster()); // replicated, need to divide row count by site count. } else resultConsumer.setRowAdjuster( RangeDistributionModel.SINGLETON.getUpdateAdjuster()); // return sum of rows modified. wg.execute(MappingSolution.AllWorkers, req, resultConsumer); } } catch (Throwable t) { throw new Exception( this.getClass().getSimpleName() + ".commitOverride = " + commitOverride, t); } }
/** * 1. generate prologue PSEUDO_bytecode from the state. 2. make new bytecodes with prologue. 3. * set method's bytecode to specialized one. 4. adjust exception map, line number map. 5. compile * the method. 6. restore bytecode, exception, linenumber map to the original one. */ public static CompiledMethod optCompile(ExecutionState state) { NormalMethod method = state.getMethod(); if (VM.TraceOnStackReplacement) { VM.sysWriteln("OPT : starts compiling " + method); } ControllerPlan latestPlan = ControllerMemory.findLatestPlan(method); OptOptions _options = null; if (latestPlan != null) { _options = latestPlan.getCompPlan().options.dup(); } else { // no previous compilation plan, a long run loop promoted from baseline. // this only happens when testing, not in real code _options = new OptOptions(); _options.setOptLevel(0); } // disable OSR points in specialized method _options.OSR_GUARDED_INLINING = false; CompilationPlan compPlan = new CompilationPlan( method, (OptimizationPlanElement[]) RuntimeCompiler.optimizationPlan, null, _options); // it is also necessary to recompile the current method // without OSR. /* generate prologue bytes */ byte[] prologue = state.generatePrologue(); int prosize = prologue.length; method.setForOsrSpecialization(prologue, state.getMaxStackHeight()); int[] oldStartPCs = null; int[] oldEndPCs = null; int[] oldHandlerPCs = null; /* adjust exception table. */ { // if (VM.TraceOnStackReplacement) { VM.sysWrite("OPT adjust exception table.\n"); } ExceptionHandlerMap exceptionHandlerMap = method.getExceptionHandlerMap(); if (exceptionHandlerMap != null) { oldStartPCs = exceptionHandlerMap.getStartPC(); oldEndPCs = exceptionHandlerMap.getEndPC(); oldHandlerPCs = exceptionHandlerMap.getHandlerPC(); int n = oldStartPCs.length; int[] newStartPCs = new int[n]; System.arraycopy(oldStartPCs, 0, newStartPCs, 0, n); exceptionHandlerMap.setStartPC(newStartPCs); int[] newEndPCs = new int[n]; System.arraycopy(oldEndPCs, 0, newEndPCs, 0, n); exceptionHandlerMap.setEndPC(newEndPCs); int[] newHandlerPCs = new int[n]; System.arraycopy(oldHandlerPCs, 0, newHandlerPCs, 0, n); exceptionHandlerMap.setHandlerPC(newHandlerPCs); for (int i = 0; i < n; i++) { newStartPCs[i] += prosize; newEndPCs[i] += prosize; newHandlerPCs[i] += prosize; } } } CompiledMethod newCompiledMethod = RuntimeCompiler.recompileWithOptOnStackSpecialization(compPlan); // restore original bytecode, exception table, and line number table method.finalizeOsrSpecialization(); { ExceptionHandlerMap exceptionHandlerMap = method.getExceptionHandlerMap(); if (exceptionHandlerMap != null) { exceptionHandlerMap.setStartPC(oldStartPCs); exceptionHandlerMap.setEndPC(oldEndPCs); exceptionHandlerMap.setHandlerPC(oldHandlerPCs); } } // compilation failed because compilation is in progress, // reverse back to the baseline if (newCompiledMethod == null) { if (VM.TraceOnStackReplacement) { VM.sysWriteln("OPT : fialed, because compilation in progress, " + "fall back to baseline"); } return baselineCompile(state); } // mark the method is a specialized one newCompiledMethod.setSpecialForOSR(); if (VM.TraceOnStackReplacement) VM.sysWriteln("OPT : done\n"); return newCompiledMethod; }
/** * Called by <b>QueryStep</b> to execute the query. * * @throws Throwable */ @Override public void executeSelf(ExecutionState estate, WorkerGroup wg, DBResultConsumer resultConsumer) throws Throwable { SSConnection ssCon = estate.getConnection(); if (ssCon.hasActiveTransaction()) throw new PEException( "Cannot execute DDL within active transaction: " + entities.getCommand(ssCon.getCatalogDAO())); boolean noteEntities = logger.isDebugEnabled() && entities != null; // Send the catalog changes to the transaction manager so that it can // back them out in the event of a catastrophic failure // Add the catalog entries. We have to do this before we execute any sql because the workers // may // depend on the catalog being correct. For instance, for a create database, the UserDatabase // has to // exist in the catalog - otherwise the create database command is sent to the wrong database. CatalogDAO c = ssCon.getCatalogDAO(); if (entities != null && entities.requiresFreshTxn()) c.cleanupRollback(); String logHeader = "(" + (entities == null ? "null" : entities.description()) + ") " + ssCon.getConnectionId(); boolean success = false; boolean sideffects = false; CacheInvalidationRecord cacheClear = null; int attempts = 0; while (!success) { if (entities != null) { entities.beforeTxn(ssCon, c, wg); } attempts++; c.begin(); try { List<CatalogEntity> entitiesToNotifyOfUpdate = new ArrayList<CatalogEntity>(); List<CatalogEntity> entitiesToNotifyOfDrop = new ArrayList<CatalogEntity>(); prepareAction(estate, c, wg, resultConsumer); // do the changes to the catalog first because we may be able to // restore the data if the ddl operation fails on the actual database if (entities != null) { entities.inTxn(ssCon, wg); cacheClear = entities.getInvalidationRecord(); QueryPlanner.invalidateCache(cacheClear); List<CatalogEntity> temp = entities.getUpdatedObjects(); if (noteEntities) logger.debug(logHeader + " updating: " + Functional.joinToString(temp, ", ")); for (CatalogEntity catEntity : temp) { c.persistToCatalog(catEntity); entitiesToNotifyOfUpdate.add(catEntity); } temp = entities.getDeletedObjects(); if (noteEntities) logger.debug(logHeader + " deleting: " + Functional.joinToString(temp, ", ")); for (CatalogEntity catEntity : temp) { if (catEntity instanceof UserDatabase) throw new IllegalArgumentException( "Use drop database operation to delete a database"); List<? extends CatalogEntity> subtemp = catEntity.getDependentEntities(c); if (subtemp != null) { if (noteEntities) logger.debug( logHeader + " deleting subtemp: " + Functional.joinToString(subtemp, ", ")); for (CatalogEntity dependentEntity : subtemp) { c.remove(dependentEntity); } entitiesToNotifyOfDrop.addAll(subtemp); } c.remove(catEntity); catEntity.removeFromParent(); entitiesToNotifyOfDrop.add(catEntity); } } // TODO: // start a transaction with the transaction manager so that DDL can be // registered to back out the DDL we are about to execute in the // event of a failure after the DDL is executed but before the txn is committed. // or - in the case where the action succeeds but the txn fails at commit if (!sideffects) { executeAction(estate, c, wg, resultConsumer); sideffects = true; if (entities != null) entities.onExecute(); } c.commit(); success = true; if (entities != null) entities.onCommit(ssCon, c, wg); if (attempts > 1 || logger.isDebugEnabled()) logger.warn("Successfully committed after " + attempts + " tries"); for (CatalogEntity updatedEntity : entitiesToNotifyOfUpdate) updatedEntity.onUpdate(); for (CatalogEntity deletedEntity : entitiesToNotifyOfDrop) deletedEntity.onDrop(); } catch (Throwable t) { logger.debug(logHeader + " while executing", t); c.retryableRollback(t); onRollback(ssCon, c, wg); if (entities == null || !entities.canRetry(t)) { logger.warn( logHeader + " giving up possibly retryable ddl txn after " + attempts + " tries"); throw new PEException(t); } // not really a warning, but it would be nice to get it back out logger.warn( logHeader + " retrying ddl after " + attempts + " tries upon exception: " + t.getMessage()); } finally { Throwable anything = null; try { if (cacheClear != null) QueryPlanner.invalidateCache(cacheClear); cacheClear = null; } catch (Throwable t) { // throwing this away - if entities has a finally block we really want it to occur anything = t; } try { entities.onFinally(ssCon); } catch (Throwable t) { if (anything == null) anything = t; } if (anything != null) throw anything; } postCommitAction(c); } // Tell the transaction manager that we have executed the catalog // changes successfully so that they can be removed from the // recovery set }