/** * The method does the following: * * <p>1.in case of none atomic command * * <p>- execute the command using the command executor * * <p>- notify all waiting thread about the command execution * * <p>2.in case of atomic command * * <p>- execute the command using the command executor * * <p>- notify all waiting thread about the command execution only * * <p>if the current command is the last element in the atomic command * * @param modelCommand * @throws PlanckDBException */ @SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter") public void doJobs(final Command modelCommand) throws PlanckDBException { boolean consume = true; if (modelCommand.getTransaction() == TRUE && modelCommand.getSessionId() == sessionMetaData.getSessionId()) { consume = false; } if (consume) { commandExecutor.consume(modelCommand); } if (atomicContainer.isPartOfAtomicCommand(modelCommand)) { Command rootCommand = atomicContainer.update(modelCommand); if (rootCommand != null) { synchronized (rootCommand) { rootCommand.notifyAll(); } } } else { synchronized (modelCommand) { modelCommand.notifyAll(); } } // log.debug("message done version : "+modelCommand.getVersion()+" id // "+modelCommand.getEntityId()); }
/** * this is the heart of the storage, * * <p>It control all the in coming commands. * * <p>The method does the following * * <p>1. validate the command * * <p>2. update the version number and the conflict number (if needed) * * <p>3. push the command to the command queue * * <p> */ public void consume(Command command) throws PlanckDBException { // update schema and coreManagerKey in command in case that the schema or coreManagerKey fields // in the command are null if (command.getSchemaId() < 0) { command.setSchemaId(getSessionMetaData().getSchemaId()); } if (command.getCoreManagerKey() == null) { command.setCoreManagerKey(getCoreManager().getKey()); } // you do not have to handle your messages which NetworkProtocolType is multicast // because you have already handle them final NetworkProtocolType type = command.getNetworkProtocolType(); if (type != null && type.isCast() && command.getCoreManagerKey().equals(getCoreManager().getKey()) && sessionMetaData.getSessionId() == command.getSessionId()) { return; } // TODO validate message if (command.isModeCommand() || command.isAtomicModelCommand()) { // model change commands // set version number or distribute if (command.getVersion() < 0) { distributionManager.produceTcp(command); int version = command.getVersion(); // return if command is lock or something was wrong if (version < 0 || command.isNotSucceed()) { return; } } if (command.isAtomicModelCommand()) { List<Command> commands = command.getCommands(); atomicContainer.register(command, commands); for (Command newCommand : commands) { commandQueue.pushCommand(newCommand); } } else { commandQueue.pushCommand(command); } } else { Integer commandType = command.getCommandType(); if (commandType == READ_LOCK_COMMAND) { if (!command.getCoreManagerKey().equals(coreManager.getKey())) { List<Command> commands = command.getCommands(); boolean lock = command.isLocked(); for (Command newCommand : commands) { int entityId = newCommand.getEntityId(); int ownerId = newCommand.getOwnerId(); if (lock) { registry.lockEntity(entityId, true, ownerId); } else { registry.lockEntity(entityId, false, NON_ENTITY_OWNER); } } } else { distributionManager.produceTcp(command); } } } }