コード例 #1
0
ファイル: InterceptorImpl.java プロジェクト: carrotli/ansql
 private CommonTree parse(String preparedSql) {
   CommonTree result = null;
   ANTLRNoCaseStringStream inputStream = new ANTLRNoCaseStringStream(preparedSql);
   MySQL51Lexer lexer = new MySQL51Lexer(inputStream);
   CommonTokenStream tokens = new CommonTokenStream(lexer);
   lexer.setErrorListener(new QueuingErrorListener(lexer));
   tokens.getTokens();
   if (lexer.getErrorListener().hasErrors()) {
     logger.warn(local.message("ERR_Lexing_SQL", preparedSql));
     return result;
   }
   PlaceholderNode.resetId();
   MySQL51Parser parser = new MySQL51Parser(tokens);
   parser.setTreeAdaptor(mySQLTreeAdaptor);
   parser.setErrorListener(new QueuingErrorListener(parser));
   try {
     CommonTree stmtTree = (CommonTree) parser.statement().getTree();
     result = stmtTree;
   } catch (RecognitionException e) {
     logger.warn(local.message("ERR_Parsing_SQL", preparedSql));
   }
   if (parser.getErrorListener().hasErrors()) {
     logger.warn(local.message("ERR_Parsing_SQL", preparedSql));
   }
   return result;
 }
コード例 #2
0
ファイル: InterceptorImpl.java プロジェクト: carrotli/ansql
 private void assertReady() {
   if (!ready) {
     if (statementInterceptor == null) {
       throw new ClusterJUserException(local.message("ERR_No_Statement_Interceptor"));
     }
     if (connectionLifecycleInterceptor == null) {
       throw new ClusterJUserException(local.message("ERR_No_Connection_Lifecycle_Interceptor"));
     }
   } else {
     if (sessionFactory == null) {
       sessionFactory = ClusterJHelper.getSessionFactory(properties);
     }
   }
 }
コード例 #3
0
ファイル: ResultDataImpl.java プロジェクト: carrotli/ansql
  public String getString(Column storeColumn) {
    int index = storeColumn.getColumnId();
    NdbRecAttr ndbRecAttr = ndbRecAttrs[index];
    if (ndbRecAttr.isNULL() == 1) return null;
    int prefixLength = storeColumn.getPrefixLength();
    int actualLength;
    int offset = offsets[index];
    byteBuffer.limit(byteBuffer.capacity());
    switch (prefixLength) {
      case 0:
        actualLength = lengths[index];
        break;
      case 1:
        actualLength = (byteBuffer.get(offset) + 256) % 256;
        offset += 1;
        break;
      case 2:
        actualLength = (byteBuffer.get(offset) + 256) % 256;
        int length2 = (byteBuffer.get(offset + 1) + 256) % 256;
        actualLength += 256 * length2;
        offset += 2;
        break;
      default:
        throw new ClusterJFatalInternalException(
            local.message("ERR_Invalid_Prefix_Length", prefixLength));
    }

    byteBuffer.position(offset);
    byteBuffer.limit(offset + actualLength);

    String result = Utility.decode(byteBuffer, storeColumn.getCharsetNumber(), bufferManager);
    byteBuffer.clear();
    return result;
  }
 public void equalLong(Column storeColumn, long value) {
   try {
     ndbOperation.equalLong(storeColumn.getName(), value);
   } catch (NdbApiException ndbApiException) {
     throw new ClusterJDatastoreException(local.message("ERR_Datastore"), ndbApiException);
   }
 }
 public void equalByte(Column storeColumn, byte b) {
   try {
     ndbOperation.equalInt(storeColumn.getName(), (int) b);
   } catch (NdbApiException ndbApiException) {
     throw new ClusterJDatastoreException(local.message("ERR_Datastore"), ndbApiException);
   }
 }
 public void setDatetime(Column storeColumn, Timestamp value) {
   try {
     ndbOperation.setDatetime(storeColumn.getName(), value);
   } catch (NdbApiException ndbApiException) {
     throw new ClusterJDatastoreException(local.message("ERR_Datastore"), ndbApiException);
   }
 }
 public void setNull(Column storeColumn) {
   try {
     ndbOperation.setNull(storeColumn.getName());
   } catch (NdbApiException ndbApiException) {
     throw new ClusterJDatastoreException(local.message("ERR_Datastore"), ndbApiException);
   }
 }
 public void setByte(Column storeColumn, byte value) {
   try {
     ndbOperation.setInt(storeColumn.getName(), (int) value);
   } catch (NdbApiException ndbApiException) {
     throw new ClusterJDatastoreException(local.message("ERR_Datastore"), ndbApiException);
   }
 }
 public Blob getBlobHandle(Column storeColumn) {
   try {
     return new BlobImpl(ndbOperation.getBlobHandle(storeColumn.getName()));
   } catch (NdbApiException ndbApiException) {
     throw new ClusterJDatastoreException(local.message("ERR_Datastore"), ndbApiException);
   }
 }
コード例 #10
0
ファイル: ResultDataImpl.java プロジェクト: carrotli/ansql
 public byte[] getBytes(Column storeColumn) {
   int index = storeColumn.getColumnId();
   NdbRecAttr ndbRecAttr = ndbRecAttrs[index];
   if (ndbRecAttr.isNULL() == 1) return null;
   int prefixLength = storeColumn.getPrefixLength();
   int actualLength = lengths[index];
   int offset = offsets[index];
   switch (prefixLength) {
     case 0:
       break;
     case 1:
       actualLength = (byteBuffer.get(offset) + 256) % 256;
       offset += 1;
       break;
     case 2:
       actualLength = (byteBuffer.get(offset) + 256) % 256;
       int length2 = (byteBuffer.get(offset + 1) + 256) % 256;
       actualLength += 256 * length2;
       offset += 2;
       break;
     default:
       throw new ClusterJFatalInternalException(
           local.message("ERR_Invalid_Prefix_Length", prefixLength));
   }
   byteBuffer.position(offset);
   byte[] result = new byte[actualLength];
   byteBuffer.get(result);
   return result;
 }
 public void equalTime(Column storeColumn, Time value) {
   try {
     Timestamp timestamp = new Timestamp(((Time) value).getTime());
     ndbOperation.equalDatetime(storeColumn.getName(), timestamp);
   } catch (NdbApiException ndbApiException) {
     throw new ClusterJDatastoreException(local.message("ERR_Datastore"), ndbApiException);
   }
 }
コード例 #12
0
 /** Return a buffer to the pool. */
 public void returnBuffer(ByteBuffer buffer) {
   //        checkGuard(buffer, "returnBuffer"); // uncomment this to enable checking
   if (buffer.capacity() != bufferSize + guard.length) {
     String message =
         local.message(
             "ERR_Wrong_Buffer_Size_Returned_To_Pool", name, bufferSize, buffer.capacity());
     throw new ClusterJFatalInternalException(message);
   }
   pool.add(buffer);
 }
コード例 #13
0
ファイル: InterceptorImpl.java プロジェクト: carrotli/ansql
 /**
  * Return the interceptor for the statement interceptor callbacks.
  *
  * @param statementInterceptor the statement interceptor
  * @param connection the connection
  * @param properties the connection properties
  * @return the interceptor delegate
  */
 public static InterceptorImpl getInterceptorImpl(
     StatementInterceptor statementInterceptor, Connection connection, Properties properties) {
   InterceptorImpl result = getInterceptorImpl(connection, properties);
   if (result.statementInterceptor != null) {
     throw new ClusterJUserException(local.message("ERR_Duplicate_Statement_Interceptor"));
   }
   result.statementInterceptor = statementInterceptor;
   if (result.connectionLifecycleInterceptor != null) {
     result.ready = true;
   }
   return result;
 }
コード例 #14
0
ファイル: ParameterImpl.java プロジェクト: carrotli/ansql
 public void setProperty(PropertyImpl property) {
   if (this.property != null && this.property.fmd.getType() != property.fmd.getType()) {
     throw new ClusterJUserException(
         local.message(
             "ERR_Multiple_Parameter_Usage",
             parameterName,
             this.property.fmd.getType().getName(),
             property.fmd.getType().getName()));
   } else {
     this.property = property;
   }
 }
コード例 #15
0
 public void setString(Column storeColumn, String value) {
   ByteBuffer stringStorageBuffer = Utility.encode(value, storeColumn, bufferManager);
   int length = stringStorageBuffer.remaining() - storeColumn.getPrefixLength();
   if (length > storeColumn.getLength()) {
     throw new ClusterJUserException(
         local.message(
             "ERR_Data_Too_Long", storeColumn.getName(), storeColumn.getLength(), length));
   }
   int returnCode = ndbOperation.setValue(storeColumn.getColumnId(), stringStorageBuffer);
   bufferManager.clearStringStorageBuffer();
   handleError(returnCode, ndbOperation);
 }
コード例 #16
0
 public void setBytes(Column storeColumn, byte[] value) {
   // TODO use the string storage buffer instead of allocating a new buffer for each value
   int length = value.length;
   if (length > storeColumn.getLength()) {
     throw new ClusterJUserException(
         local.message(
             "ERR_Data_Too_Long", storeColumn.getName(), storeColumn.getLength(), length));
   }
   ByteBuffer buffer = Utility.convertValue(storeColumn, value);
   int returnCode = ndbOperation.setValue(storeColumn.getColumnId(), buffer);
   handleError(returnCode, ndbOperation);
 }
 private int convertBoundType(BoundType type) {
   switch (type) {
     case BoundEQ:
       return NdbIndexScanOperation.BoundType.BoundEQ;
     case BoundGE:
       return NdbIndexScanOperation.BoundType.BoundGE;
     case BoundGT:
       return NdbIndexScanOperation.BoundType.BoundGT;
     case BoundLE:
       return NdbIndexScanOperation.BoundType.BoundLE;
     case BoundLT:
       return NdbIndexScanOperation.BoundType.BoundLT;
     default:
       throw new ClusterJFatalInternalException(
           local.message("ERR_Implementation_Should_Not_Occur"));
   }
 }
コード例 #18
0
 /** Check the guard */
 void checkGuard(ByteBuffer buffer, String where) {
   buffer.clear();
   boolean fail = false;
   // the buffer has guard.length extra bytes in it, initialized with the guard bytes
   buffer.position(buffer.capacity() - guard.length);
   for (int i = 0; i < guard.length; ++i) {
     byte actual = buffer.get();
     byte expected = guard[i];
     if (expected != actual) {
       fail = true;
       logger.warn(
           local.message(
               "WARN_Buffer_Pool_Guard_Check_Failed",
               where,
               (buffer.capacity() - guard.length),
               expected,
               actual,
               buffer.toString()));
     }
   }
   // reset it for next time
   initializeGuard(buffer);
 }
コード例 #19
0
ファイル: ParameterImpl.java プロジェクト: carrotli/ansql
 public Predicate lessEqual(PredicateOperand other) {
   throw new UnsupportedOperationException(local.message("ERR_NotImplemented"));
 }
コード例 #20
0
ファイル: ParameterImpl.java プロジェクト: carrotli/ansql
 public Predicate between(PredicateOperand lower, PredicateOperand upper) {
   throw new UnsupportedOperationException(local.message("ERR_NotImplemented"));
 }
コード例 #21
0
ファイル: ParameterImpl.java プロジェクト: carrotli/ansql
/** This represents a named parameter that is bound at execution time to a value. */
public class ParameterImpl implements PredicateOperand {

  /** My message translator */
  static final I18NHelper local = I18NHelper.getInstance(ParameterImpl.class);

  /** My logger */
  static final Logger logger = LoggerFactoryService.getFactory().getInstance(ParameterImpl.class);

  /** My domain object. */
  protected QueryDomainTypeImpl<?> dobj;

  /** My property (set when bound) */
  protected PropertyImpl property;

  /** My parameter name */
  protected String parameterName;

  /** Is a value bound to this parameter? */
  protected boolean bound = false;

  /** Is this parameter marked (used in the query)? */
  protected boolean marked = false;

  public ParameterImpl(QueryDomainTypeImpl<?> dobj, String parameterName) {
    this.dobj = dobj;
    this.parameterName = parameterName;
  }

  public void mark() {
    marked = true;
  }

  boolean isMarkedAndUnbound(QueryExecutionContext context) {
    return marked && !context.isBound(parameterName);
  }

  void unmark() {
    marked = false;
  }

  public String getName() {
    return parameterName;
  }

  public Object getParameterValue(QueryExecutionContext context) {
    return property.getParameterValue(context, parameterName);
  }

  public Predicate equal(PredicateOperand predicateOperand) {
    throw new UnsupportedOperationException(local.message("ERR_NotImplemented"));
  }

  public Predicate between(PredicateOperand lower, PredicateOperand upper) {
    throw new UnsupportedOperationException(local.message("ERR_NotImplemented"));
  }

  public Predicate greaterThan(PredicateOperand other) {
    throw new UnsupportedOperationException(local.message("ERR_NotImplemented"));
  }

  public Predicate greaterEqual(PredicateOperand other) {
    throw new UnsupportedOperationException(local.message("ERR_NotImplemented"));
  }

  public Predicate lessThan(PredicateOperand other) {
    throw new UnsupportedOperationException(local.message("ERR_NotImplemented"));
  }

  public Predicate lessEqual(PredicateOperand other) {
    throw new UnsupportedOperationException(local.message("ERR_NotImplemented"));
  }

  public Predicate in(PredicateOperand other) {
    throw new UnsupportedOperationException(local.message("ERR_NotImplemented"));
  }

  public Predicate like(PredicateOperand other) {
    throw new UnsupportedOperationException(local.message("ERR_NotImplemented"));
  }

  public Predicate isNull() {
    throw new UnsupportedOperationException(local.message("ERR_NotImplemented"));
  }

  public Predicate isNotNull() {
    throw new UnsupportedOperationException(local.message("ERR_NotImplemented"));
  }

  public void setProperty(PropertyImpl property) {
    if (this.property != null && this.property.fmd.getType() != property.fmd.getType()) {
      throw new ClusterJUserException(
          local.message(
              "ERR_Multiple_Parameter_Usage",
              parameterName,
              this.property.fmd.getType().getName(),
              property.fmd.getType().getName()));
    } else {
      this.property = property;
    }
  }
}
コード例 #22
0
ファイル: ResultDataImpl.java プロジェクト: carrotli/ansql
 public Object getObject(Column storeColumn) {
   throw new ClusterJFatalInternalException(local.message("ERR_Implementation_Should_Not_Occur"));
 }
コード例 #23
0
ファイル: ParameterImpl.java プロジェクト: carrotli/ansql
 public Predicate isNotNull() {
   throw new UnsupportedOperationException(local.message("ERR_NotImplemented"));
 }
コード例 #24
0
ファイル: ResultDataImpl.java プロジェクト: carrotli/ansql
 public boolean wasNull(Column storeColumn) {
   throw new ClusterJFatalInternalException(local.message("ERR_Implementation_Should_Not_Occur"));
 }
コード例 #25
0
/**
 * This class implements a simple monotonically-growing pool of ByteBuffer. Each NdbRecord has its
 * own pool, including the value NdbRecord (all columns) and each index NdbRecord used for index
 * scans and delete operations.
 */
class FixedByteBufferPoolImpl {

  /** My message translator */
  static final I18NHelper local = I18NHelper.getInstance(FixedByteBufferPoolImpl.class);

  /** My logger */
  static final Logger logger =
      LoggerFactoryService.getFactory().getInstance(FixedByteBufferPoolImpl.class);

  /** The guard initialization bytes; to add a guard after the record, change the array size */
  static byte[] guard = new byte[0];

  static {
    for (int i = 0; i < guard.length; ++i) {
      guard[i] = (byte) 10;
    }
  }

  /** Initialize the guard for a specific buffer. */
  void initializeGuard(ByteBuffer buffer) {
    // the buffer has guard.length extra bytes in it, initialized with the guard bytes array
    buffer.position(buffer.capacity() - guard.length);
    buffer.put(guard);
    buffer.clear();
  }
  /** Check the guard */
  void checkGuard(ByteBuffer buffer, String where) {
    buffer.clear();
    boolean fail = false;
    // the buffer has guard.length extra bytes in it, initialized with the guard bytes
    buffer.position(buffer.capacity() - guard.length);
    for (int i = 0; i < guard.length; ++i) {
      byte actual = buffer.get();
      byte expected = guard[i];
      if (expected != actual) {
        fail = true;
        logger.warn(
            local.message(
                "WARN_Buffer_Pool_Guard_Check_Failed",
                where,
                (buffer.capacity() - guard.length),
                expected,
                actual,
                buffer.toString()));
      }
    }
    // reset it for next time
    initializeGuard(buffer);
  }

  /** The pool of ByteBuffer */
  ConcurrentLinkedQueue<ByteBuffer> pool;

  /** The name of this pool */
  String name;

  /** The length of each buffer */
  int bufferSize;

  /** The high water mark of this pool */
  int highWaterMark = 0;

  /** Construct an empty pool */
  public FixedByteBufferPoolImpl(int bufferSize, String name) {
    this.bufferSize = bufferSize;
    this.name = name;
    this.pool = new ConcurrentLinkedQueue<ByteBuffer>();
    logger.info("FixedByteBufferPoolImpl<init> for " + name + " bufferSize " + bufferSize);
  }

  /** Borrow a buffer from the pool. If none in the pool, create a new one. */
  public ByteBuffer borrowBuffer() {
    ByteBuffer buffer = pool.poll();
    if (buffer == null) {
      buffer = ByteBuffer.allocateDirect(bufferSize + guard.length);
      initializeGuard(buffer);
      if (logger.isDetailEnabled())
        logger.detail(
            "FixedByteBufferPoolImpl for "
                + name
                + " got new  buffer: position "
                + buffer.position()
                + " capacity "
                + buffer.capacity()
                + " limit "
                + buffer.limit());
    } else {
      if (logger.isDetailEnabled())
        logger.detail(
            "FixedByteBufferPoolImpl for "
                + name
                + " got used buffer: position "
                + buffer.position()
                + " capacity "
                + buffer.capacity()
                + " limit "
                + buffer.limit());
    }
    buffer.clear();
    return buffer;
  }

  /** Return a buffer to the pool. */
  public void returnBuffer(ByteBuffer buffer) {
    //        checkGuard(buffer, "returnBuffer"); // uncomment this to enable checking
    if (buffer.capacity() != bufferSize + guard.length) {
      String message =
          local.message(
              "ERR_Wrong_Buffer_Size_Returned_To_Pool", name, bufferSize, buffer.capacity());
      throw new ClusterJFatalInternalException(message);
    }
    pool.add(buffer);
  }
}
コード例 #26
0
ファイル: InterceptorImpl.java プロジェクト: carrotli/ansql
/**
 * This class implements the behavior associated with connection callbacks for statement execution
 * and connection lifecycle. There is a clusterj session associated with the interceptor that is
 * used to interact with the cluster. There is exactly one statement interceptor and one connection
 * lifecycle interceptor associated with the interceptor. All of the SQL post-parsing behavior is
 * contained here, and uses classes in the org.antlr.runtime and com.mysql.clusterj.jdbc.antlr
 * packages to perform the parsing of the SQL statement. Analysis of the parsed SQL statement occurs
 * here, and clusterj artifacts are constructed for use in other classes, in particular SQLExecutor
 * and its command-specific subclasses.
 */
public class InterceptorImpl {

  /** Register logger for JDBC stuff. */
  static {
    LoggerFactoryService.getFactory().registerLogger("com.mysql.clusterj.jdbc");
  }

  /** My message translator */
  static final I18NHelper local = I18NHelper.getInstance(InterceptorImpl.class);

  /** My logger */
  static final Logger logger = LoggerFactoryService.getFactory().getInstance(InterceptorImpl.class);

  static Map<String, Executor> parsedSqlMap = new IdentityHashMap<String, Executor>();

  /** The map of connection to interceptor */
  private static Map<Connection, InterceptorImpl> interceptorImplMap =
      new IdentityHashMap<Connection, InterceptorImpl>();

  /** The connection properties */
  private Properties properties;

  /** The connection being intercepted */
  private Connection connection;

  /** The session factory for this connection */
  SessionFactory sessionFactory;

  /** The current session (null if no session) */
  private SessionSPI session;

  /** The statement interceptor (only used during initialization) */
  private StatementInterceptor statementInterceptor;

  /** The connection lifecycle interceptor (only used during initialization) */
  private ConnectionLifecycleInterceptor connectionLifecycleInterceptor;

  /** The interceptor is ready (both interceptors are registered) */
  private boolean ready = false;

  private boolean autocommit;

  private static String LOTSOBLANKS =
      "                                                                          ";

  /**
   * Create the interceptor.
   *
   * @param connection the connection being intercepted
   * @param properties the connection properties
   */
  public InterceptorImpl(Connection connection, Properties properties) {
    if (logger.isDebugEnabled()) logger.debug("constructed with properties: " + properties);
    this.properties = properties;
    this.connection = connection;
    // if database name is not specified, translate DBNAME to the required clusterj property
    String dbname =
        properties.getProperty("com.mysql.clusterj.database", properties.getProperty("DBNAME"));
    properties.put("com.mysql.clusterj.database", dbname);
  }

  /**
   * Return the interceptor for the connection lifecycle callbacks.
   *
   * @param connectionLifecycleInterceptor the connection lifecycle interceptor
   * @param connection the connection
   * @param properties the connection properties
   * @return the interceptor delegate
   */
  public static InterceptorImpl getInterceptorImpl(
      ConnectionLifecycleInterceptor connectionLifecycleInterceptor,
      Connection connection,
      Properties properties) {
    InterceptorImpl result = getInterceptorImpl(connection, properties);
    if (result.connectionLifecycleInterceptor != null) {
      if (result.connectionLifecycleInterceptor != connectionLifecycleInterceptor) {
        throw new ClusterJUserException(
            local.message("ERR_Duplicate_Connection_Lifecycle_Interceptor"));
      }
    } else {
      result.connectionLifecycleInterceptor = connectionLifecycleInterceptor;
    }
    if (result.statementInterceptor != null) {
      result.ready = true;
    }
    return result;
  }

  /**
   * Return the interceptor for the statement interceptor callbacks.
   *
   * @param statementInterceptor the statement interceptor
   * @param connection the connection
   * @param properties the connection properties
   * @return the interceptor delegate
   */
  public static InterceptorImpl getInterceptorImpl(
      StatementInterceptor statementInterceptor, Connection connection, Properties properties) {
    InterceptorImpl result = getInterceptorImpl(connection, properties);
    if (result.statementInterceptor != null) {
      throw new ClusterJUserException(local.message("ERR_Duplicate_Statement_Interceptor"));
    }
    result.statementInterceptor = statementInterceptor;
    if (result.connectionLifecycleInterceptor != null) {
      result.ready = true;
    }
    return result;
  }

  /**
   * Create the interceptor to handle both connection lifecycle and statement interceptors.
   *
   * @param connection the connection
   * @param properties the connection properties
   * @return
   */
  public static InterceptorImpl getInterceptorImpl(Connection connection, Properties properties) {
    InterceptorImpl result;
    synchronized (interceptorImplMap) {
      result = interceptorImplMap.get(connection);
      if (result == null) {
        result = new InterceptorImpl(connection, properties);
        interceptorImplMap.put(connection, result);
      }
    }
    return result;
  }

  /**
   * Return the interceptor assigned to the connection. If there is no interceptor, return null.
   *
   * @param connection the connection
   * @return the interceptor for this connection or null if there is no interceptor
   */
  public static InterceptorImpl getInterceptorImpl(java.sql.Connection connection) {
    synchronized (interceptorImplMap) {
      return interceptorImplMap.get(connection);
    }
  }

  @Override
  public String toString() {
    return "InterceptorImpl "
    //        + " properties: "+ properties.toString()
    ;
  }

  void destroy() {
    if (sessionFactory != null) {
      if (session != null) {
        session.close();
      }
      sessionFactory.close();
      sessionFactory = null;
      synchronized (interceptorImplMap) {
        interceptorImplMap.remove(connection);
      }
    }
  }

  public SessionSPI getSession() {
    if (session == null) {
      session = (SessionSPI) sessionFactory.getSession();
    }
    return session;
  }

  public boolean executeTopLevelOnly() {
    //        assertReady();
    boolean result = true;
    return result;
  }

  public ResultSetInternalMethods postProcess(
      String sql,
      Statement statement,
      ResultSetInternalMethods result,
      Connection connection,
      int arg4,
      boolean arg5,
      boolean arg6,
      SQLException sqlException)
      throws SQLException {
    assertReady();
    return null;
  }

  public ResultSetInternalMethods preProcess(String sql, Statement statement, Connection connection)
      throws SQLException {
    assertReady();
    if (logger.isDebugEnabled() && statement != null)
      logger.debug(statement.getClass().getName() + ": " + sql);
    if (statement instanceof com.mysql.jdbc.PreparedStatement) {
      com.mysql.jdbc.PreparedStatement preparedStatement =
          (com.mysql.jdbc.PreparedStatement) statement;
      // key must be interned because we are using IdentityHashMap
      // TODO: in case of DELETE, the SQL has already been rewritten at this point,
      // and the original SQL is gone
      // so the key in the table is the rewritten DELETE SQL -- not what we want at all
      String nonRewrittenSql = preparedStatement.getNonRewrittenSql();
      String internedSql = nonRewrittenSql.intern();

      // see if we have a parsed version of this query
      Executor sQLExecutor = null;
      synchronized (parsedSqlMap) {
        sQLExecutor = parsedSqlMap.get(internedSql);
      }
      // if no cached SQLExecutor, create it, which might take some time
      if (sQLExecutor == null) {
        sQLExecutor = createSQLExecutor(internedSql);
        if (sQLExecutor != null) {
          // multiple thread might have created a SQLExecutor but it's ok
          synchronized (parsedSqlMap) {
            parsedSqlMap.put(internedSql, sQLExecutor);
          }
        }
      }
      try {
        return sQLExecutor.execute(this, preparedStatement);
      } catch (Throwable t) {
        t.printStackTrace();
        return null;
      }
    } else {
      if (logger.isDebugEnabled() && statement != null)
        logger.debug(
            statement.getClass().getName() + " is not instanceof com.mysql.jdbc.PreparedStatement");
      // not a prepared statement; won't execute this
      return null;
    }
  }

  /** @param preparedSql */
  private Executor createSQLExecutor(String preparedSql) {
    if (logger.isDetailEnabled()) logger.detail(preparedSql);
    Executor result = null;
    // parse the sql
    CommonTree root = parse(preparedSql);
    // get the root of the tree
    int tokenType = root.getType();
    // perform command-specific actions
    String tableName = "";
    CommonTree tableNode;
    WhereNode whereNode;
    String whereType = "empty";
    List<String> columnNames = new ArrayList<String>();
    Dictionary dictionary;
    DomainTypeHandlerImpl<?> domainTypeHandler;
    QueryDomainTypeImpl<?> queryDomainType = null;
    int numberOfParameters = 0;
    switch (tokenType) {
      case MySQL51Parser.INSERT:
        tableNode = (CommonTree) root.getFirstChildWithType(MySQL51Parser.TABLE);
        tableName = getTableName(tableNode);
        getSession();
        dictionary = session.getDictionary();
        domainTypeHandler = getDomainTypeHandler(tableName, dictionary);
        CommonTree insertValuesNode =
            (CommonTree) root.getFirstChildWithType(MySQL51Parser.INSERT_VALUES);
        CommonTree columnsNode =
            (CommonTree) insertValuesNode.getFirstChildWithType(MySQL51Parser.COLUMNS);
        List<CommonTree> fields = columnsNode.getChildren();
        for (CommonTree field : fields) {
          columnNames.add(getColumnName(field));
        }
        if (logger.isDetailEnabled())
          logger.detail(
              "StatementInterceptorImpl.preProcess parse result INSERT INTO "
                  + tableName
                  + " COLUMNS "
                  + columnNames);
        result = new SQLExecutor.Insert(domainTypeHandler, columnNames);
        break;
      case MySQL51Parser.SELECT:
        CommonTree fromNode = (CommonTree) root.getFirstChildWithType(MySQL51Parser.FROM);
        if (fromNode == null) {
          // no from clause; cannot handle this case so return a do-nothing ParsedSQL
          result = new SQLExecutor.Noop();
          break;
        }
        try {
          // this currently handles only FROM clauses with a single table
          tableNode = (CommonTree) fromNode.getFirstChildWithType(MySQL51Parser.TABLE);
          tableName = getTableName(tableNode);
        } catch (Exception e) {
          // trouble with the FROM clause; log the SQL statement and the parser output
          logger.info("Problem with FROM clause in SQL statement: " + preparedSql);
          logger.info(walk(root));
          result = new SQLExecutor.Noop();
          break;
        }
        boolean forUpdate = null != (CommonTree) root.getFirstChildWithType(MySQL51Parser.FOR);
        boolean lockShared = null != (CommonTree) root.getFirstChildWithType(MySQL51Parser.LOCK);
        LockMode lockMode = LockMode.READ_COMMITTED;
        if (forUpdate) {
          lockMode = LockMode.EXCLUSIVE;
        } else if (lockShared) {
          lockMode = LockMode.SHARED;
        }
        getSession();
        dictionary = session.getDictionary();
        domainTypeHandler = getDomainTypeHandler(tableName, dictionary);
        columnsNode = (CommonTree) root.getFirstChildWithType(MySQL51Parser.COLUMNS);
        List<CommonTree> selectExprNodes = columnsNode.getChildren();
        for (CommonTree selectExprNode : selectExprNodes) {
          columnNames.add(getColumnName(getFieldNode(selectExprNode)));
        }
        if (logger.isDetailEnabled())
          logger.detail("SELECT FROM " + tableName + " COLUMNS " + columnNames);
        // we need to distinguish three cases:
        // - no where clause (select all rows)
        // - where clause that cannot be executed by clusterj
        // - where clause that can be executed by clusterj
        whereNode = ((SelectNode) root).getWhereNode();
        queryDomainType = (QueryDomainTypeImpl<?>) session.createQueryDomainType(domainTypeHandler);
        if (whereNode == null) {
          // no where clause (select all rows)
          result =
              new SQLExecutor.Select(domainTypeHandler, columnNames, queryDomainType, lockMode);
        } else {
          // create a predicate from the tree
          Predicate predicate = whereNode.getPredicate(queryDomainType);
          if (predicate != null) {
            // where clause that can be executed by clusterj
            queryDomainType.where(predicate);
            numberOfParameters = whereNode.getNumberOfParameters();
            result =
                new SQLExecutor.Select(
                    domainTypeHandler, columnNames, queryDomainType, lockMode, numberOfParameters);
            whereType = "clusterj";
          } else {
            // where clause that cannot be executed by clusterj
            result = new SQLExecutor.Noop();
            whereType = "non-clusterj";
          }
          if (logger.isDetailEnabled()) logger.detail(walk(root));
        }
        if (logger.isDetailEnabled()) {
          logger.detail(
              "SELECT FROM " + tableName + " COLUMNS " + columnNames + " whereType " + whereType);
          logger.detail(walk(root));
        }
        logger.info(preparedSql + ": " + whereType);
        break;
      case MySQL51Parser.DELETE:
        tableNode = (CommonTree) root.getFirstChildWithType(MySQL51Parser.TABLE);
        tableName = getTableName(tableNode);
        getSession();
        dictionary = session.getDictionary();
        domainTypeHandler = getDomainTypeHandler(tableName, dictionary);
        whereNode = ((WhereNode) root.getFirstChildWithType(MySQL51Parser.WHERE));
        if (whereNode == null) {
          // no where clause (delete all rows)
          result = new SQLExecutor.Delete(domainTypeHandler);
          whereType = "empty";
        } else {
          // create a predicate from the tree
          queryDomainType =
              (QueryDomainTypeImpl<?>) session.createQueryDomainType(domainTypeHandler);
          Predicate predicate = whereNode.getPredicate(queryDomainType);
          if (predicate != null) {
            // where clause that can be executed by clusterj
            queryDomainType.where(predicate);
            numberOfParameters = whereNode.getNumberOfParameters();
            result = new SQLExecutor.Delete(domainTypeHandler, queryDomainType, numberOfParameters);
            whereType = "clusterj";
          } else {
            // where clause that cannot be executed by clusterj
            result = new SQLExecutor.Noop();
            whereType = "non-clusterj";
          }
          if (logger.isDetailEnabled()) logger.detail(walk(root));
        }
        if (logger.isDetailEnabled())
          logger.detail(
              "DELETE FROM "
                  + tableName
                  + " whereType "
                  + whereType
                  + " number of parameters "
                  + numberOfParameters);
        logger.info(preparedSql + ": " + whereType);
        break;
      case MySQL51Parser.UPDATE:
        // UPDATE table SET column = value, column = value WHERE where-clause
        tableNode = (CommonTree) root.getFirstChildWithType(MySQL51Parser.TABLE);
        tableName = getTableName(tableNode);
        getSession();
        dictionary = session.getDictionary();
        domainTypeHandler = getDomainTypeHandler(tableName, dictionary);
        CommonTree setNode = (CommonTree) root.getFirstChildWithType(MySQL51Parser.SET);
        // create list of columns to update
        // SET node has one child for each <field> = <value
        List<CommonTree> equalNodes = setNode.getChildren();
        List<Integer> parameterNumbers = new ArrayList<Integer>();
        for (CommonTree equalNode : equalNodes) {
          // each equalNode has a FIELD node and a parameter node
          columnNames.add(getColumnName(getFieldNode(equalNode)));
          PlaceholderNode parameterNode = (PlaceholderNode) equalNode.getChild(1);
          parameterNumbers.add(parameterNode.getId());
        }
        if (logger.isDetailEnabled())
          logger.detail("Update " + columnNames + " values " + parameterNumbers);
        whereNode = ((WhereNode) root.getFirstChildWithType(MySQL51Parser.WHERE));
        if (whereNode == null) {
          // no where clause (update all rows)
          whereType = "non-clusterj";
          // return a do-nothing ParsedSQL
          if (logger.isDetailEnabled())
            logger.detail(
                "ClusterJ cannot process this SQL statement: "
                    + "unsupported statement type (UPDATE without WHERE clause.");
          result = new SQLExecutor.Noop();
        } else {
          // create a predicate from the tree
          queryDomainType =
              (QueryDomainTypeImpl<?>) session.createQueryDomainType(domainTypeHandler);
          PredicateImpl predicate = (PredicateImpl) whereNode.getPredicate(queryDomainType);
          if (predicate != null) {
            // where clause that can be executed by clusterj
            queryDomainType.where(predicate);
            List<String> whereColumnNames = predicate.getTopLevelPropertyNames();
            numberOfParameters = equalNodes.size() + whereColumnNames.size();
            result =
                new SQLExecutor.Update(
                    domainTypeHandler,
                    queryDomainType,
                    numberOfParameters,
                    columnNames,
                    whereColumnNames);
            whereType = "clusterj";
          } else {
            // where clause that cannot be executed by clusterj
            result = new SQLExecutor.Noop();
            whereType = "non-clusterj";
          }
          if (logger.isDetailEnabled()) logger.detail(walk(root));
        }
        if (logger.isDetailEnabled())
          logger.detail(
              "UPDATE "
                  + tableName
                  + " whereType "
                  + whereType
                  + " number of parameters "
                  + numberOfParameters);
        logger.info(preparedSql + ": " + whereType);
        break;
      default:
        // return a do-nothing ParsedSQL
        if (logger.isDetailEnabled())
          logger.detail("ClusterJ cannot process this SQL statement: unsupported statement type.");
        result = new SQLExecutor.Noop();
    }
    return result;
  }

  private String getPrimaryKeyFieldName(CommonTree whereNode) {
    String result = null;
    CommonTree operation = (CommonTree) whereNode.getChild(0);
    if (MySQL51Parser.EQUALS == operation.getType()) {
      result = operation.getChild(0).getChild(0).getText();
    } else {
      throw new ClusterJUserException("Cannot find primary key in WHERE clause.");
    }
    return result;
  }

  private String walk(CommonTree tree) {
    StringBuilder buffer = new StringBuilder();
    walk(tree, buffer, 0);
    return buffer.toString();
  }

  @SuppressWarnings("unchecked") // tree.getChildren()
  private void walk(CommonTree tree, StringBuilder buffer, int level) {
    String indent = LOTSOBLANKS.substring(0, level);
    Token token = tree.token;
    int tokenType = token.getType();
    String tokenText = token.getText();
    int childCount = tree.getChildCount();
    int childIndex = tree.getChildIndex();
    buffer.append('\n');
    buffer.append(indent);
    buffer.append(tokenText);
    buffer.append(" class: ");
    buffer.append(tree.getClass().getName());
    buffer.append(" tokenType ");
    buffer.append(tokenType);
    buffer.append(" child count ");
    buffer.append(childCount);
    buffer.append(" child index ");
    buffer.append(childIndex);
    List<CommonTree> children = tree.getChildren();
    if (children == null) {
      return;
    }
    for (CommonTree child : children) {
      walk(child, buffer, level + 2);
    }
  }

  private CommonTree parse(String preparedSql) {
    CommonTree result = null;
    ANTLRNoCaseStringStream inputStream = new ANTLRNoCaseStringStream(preparedSql);
    MySQL51Lexer lexer = new MySQL51Lexer(inputStream);
    CommonTokenStream tokens = new CommonTokenStream(lexer);
    lexer.setErrorListener(new QueuingErrorListener(lexer));
    tokens.getTokens();
    if (lexer.getErrorListener().hasErrors()) {
      logger.warn(local.message("ERR_Lexing_SQL", preparedSql));
      return result;
    }
    PlaceholderNode.resetId();
    MySQL51Parser parser = new MySQL51Parser(tokens);
    parser.setTreeAdaptor(mySQLTreeAdaptor);
    parser.setErrorListener(new QueuingErrorListener(parser));
    try {
      CommonTree stmtTree = (CommonTree) parser.statement().getTree();
      result = stmtTree;
    } catch (RecognitionException e) {
      logger.warn(local.message("ERR_Parsing_SQL", preparedSql));
    }
    if (parser.getErrorListener().hasErrors()) {
      logger.warn(local.message("ERR_Parsing_SQL", preparedSql));
    }
    return result;
  }

  private TreeAdaptor mySQLTreeAdaptor =
      new CommonTreeAdaptor() {
        public Object create(Token token) {
          return new Node(token);
        }

        public Object dupNode(Object t) {
          if (t == null) return null;
          return create(((Node) t).token);
        }
      };

  private String getTableName(CommonTree tableNode) {
    return tableNode.getChild(0).getText();
  }

  private String getColumnName(CommonTree fieldNode) {
    return fieldNode.getChild(0).getText();
  }

  private CommonTree getFieldNode(CommonTree selectExprNode) {
    return (CommonTree) selectExprNode.getChild(0);
  }

  public void destroy(StatementInterceptor statementInterceptor) {}

  public void destroy(ConnectionLifecycleInterceptor connectionLifecycleInterceptor) {}

  private void assertReady() {
    if (!ready) {
      if (statementInterceptor == null) {
        throw new ClusterJUserException(local.message("ERR_No_Statement_Interceptor"));
      }
      if (connectionLifecycleInterceptor == null) {
        throw new ClusterJUserException(local.message("ERR_No_Connection_Lifecycle_Interceptor"));
      }
    } else {
      if (sessionFactory == null) {
        sessionFactory = ClusterJHelper.getSessionFactory(properties);
      }
    }
  }

  /** TODO This needs to be rewritten with a proper state machine. */
  public boolean setAutoCommit(boolean autocommit) throws SQLException {
    assertReady();
    logStatus("setAutoCommit(" + autocommit + ")");
    this.autocommit = autocommit;
    getSession();
    if (!autocommit) {
      // start a transaction
      if (!session.currentTransaction().isActive()) {
        session.begin();
      }
    } else {
      // roll back the previous transaction if active
      if (session.currentTransaction().isActive()) {
        session.rollback();
      }
    }
    return true; // let the driver perform its own autocommit behavior
  }

  public void close() {}

  public boolean commit() throws SQLException {
    logStatus("commit");
    if (session.currentTransaction().isActive()) {
      session.commit();
    } else {
      System.out.println("WARNING: commit called when session.transaction is not active");
    }
    session.begin();
    return true;
  }

  public boolean rollback() throws SQLException {
    logStatus("rollback");
    session.rollback();
    session.begin();
    return true;
  }

  public boolean rollback(Savepoint savepoint) throws SQLException {
    logStatus("rollback(Savepoint)");
    return true;
  }

  public boolean setCatalog(String catalog) throws SQLException {
    if (logger.isDebugEnabled()) logger.debug("catalog: " + catalog);
    return true;
  }

  public boolean transactionCompleted() throws SQLException {
    logStatus("transactionCompleted");
    return true;
  }

  public boolean transactionBegun() throws SQLException {
    logStatus("transactionBegun");
    return true;
  }

  private DomainTypeHandlerImpl<?> getDomainTypeHandler(String tableName, Dictionary dictionary) {
    DomainTypeHandlerImpl<?> domainTypeHandler =
        DomainTypeHandlerImpl.getDomainTypeHandler(tableName, dictionary);
    return domainTypeHandler;
  }

  private void logStatus(String s) throws SQLException {
    if (logger.isDetailEnabled()) {
      StringBuilder builder = new StringBuilder("In ");
      builder.append(s);
      builder.append(" with");
      if (connection != null) {
        builder.append(" connection.getAutocommit: " + connection.getAutoCommit());
      }
      if (session != null) {
        builder.append(" session.isActive: " + session.currentTransaction().isActive());
      }
      builder.append('\n');
      String message = builder.toString();
      logger.detail(message);
    }
  }
}
コード例 #27
0
ファイル: ResultDataImpl.java プロジェクト: carrotli/ansql
class ResultDataImpl implements ResultData {

  /** My message translator */
  static final I18NHelper local = I18NHelper.getInstance(ResultDataImpl.class);

  /** My logger */
  static final Logger logger = LoggerFactoryService.getFactory().getInstance(ResultDataImpl.class);

  /** Flags for iterating a scan */
  protected final int RESULT_READY = 0;

  protected final int SCAN_FINISHED = 1;
  protected final int CACHE_EMPTY = 2;

  /** The NdbOperation that defines the result */
  private NdbOperation ndbOperation = null;

  /** The NdbRecAttrs that specify the columns to retrieve */
  private NdbRecAttr[] ndbRecAttrs = null;

  /** The flag indicating that there are no more results */
  private boolean nextDone;

  /** The ByteBuffer containing the results, possibly obtained from buffer manager */
  private ByteBuffer byteBuffer = null;

  /** Offsets into the ByteBuffer containing the results */
  private int[] offsets = null;

  /** Lengths of the fields in the ByteBuffer containing the results */
  private int[] lengths = null;

  /** The Columns in this result */
  private final Column[] storeColumns;

  /** The buffer manager */
  private BufferManager bufferManager;

  /** The cluster connection */
  private ClusterConnectionImpl clusterConnection;

  /**
   * Construct the ResultDataImpl based on an NdbOperation, a list of columns to include in the
   * result, and the pre-computed buffer layout for the result.
   *
   * @param ndbOperation the NdbOperation
   * @param storeColumns the columns in the result
   * @param maximumColumnId the largest column id
   * @param bufferSize the size of the buffer needed
   * @param offsets the array of offsets indexed by column id
   * @param lengths the array of lengths indexed by column id
   * @param bufferManager the buffer manager
   * @param allocateNew true to allocate a new (unshared) result buffer
   */
  public ResultDataImpl(
      NdbOperation ndbOperation,
      List<Column> storeColumns,
      int maximumColumnId,
      int bufferSize,
      int[] offsets,
      int[] lengths,
      BufferManager bufferManager,
      boolean allocateNew) {
    this.ndbOperation = ndbOperation;
    this.bufferManager = bufferManager;
    // save the column list
    this.storeColumns = storeColumns.toArray(new Column[storeColumns.size()]);
    this.offsets = offsets;
    this.lengths = lengths;
    if (allocateNew) {
      byteBuffer = ByteBuffer.allocateDirect(bufferSize);
    } else {
      byteBuffer = bufferManager.getResultDataBuffer(bufferSize);
    }
    byteBuffer.order(ByteOrder.nativeOrder());
    // iterate the list of store columns and allocate an NdbRecAttr (via getValue) for each
    ndbRecAttrs = new NdbRecAttr[maximumColumnId + 1];
    for (Column storeColumn : storeColumns) {
      NdbRecAttr ndbRecAttr = null;
      int columnId = storeColumn.getColumnId();
      byteBuffer.position(offsets[columnId]);
      if (lengths[columnId] == 0) {
        // TODO: to help profiling
        ndbRecAttr = ndbOperation.getValue(columnId, null);
        //                ndbRecAttr = getValue(ndbOperation, columnId, null);
      } else {
        ndbRecAttr = ndbOperation.getValue(columnId, byteBuffer);
        //                ndbRecAttr = getValue(ndbOperation, columnId, byteBuffer);
      }
      handleError(ndbRecAttr, ndbOperation);
      ndbRecAttrs[columnId] = ndbRecAttr;
    }
  }

  public boolean next() {
    // NdbOperation has exactly zero or one result. ScanResultDataImpl handles scans...
    NdbErrorConst error = ndbOperation.getNdbError();
    // if the ndbOperation reports an error there is no result
    int errorCode = error.code();
    if (errorCode != 0) {
      setNoResult();
    }
    if (nextDone) {
      return false;
    } else {
      nextDone = true;
      return true;
    }
  }

  public Blob getBlob(int column) {
    return getBlob(storeColumns[column]);
  }

  public Blob getBlob(Column storeColumn) {
    NdbBlob ndbBlob = ndbOperation.getBlobHandle(storeColumn.getColumnId());
    handleError(ndbBlob, ndbOperation);
    return new BlobImpl(ndbBlob, clusterConnection.getByteBufferPool());
  }

  public boolean getBoolean(int column) {
    return getBoolean(storeColumns[column]);
  }

  public boolean getBoolean(Column storeColumn) {
    int index = storeColumn.getColumnId();
    NdbRecAttr ndbRecAttr = ndbRecAttrs[index];
    return Utility.getBoolean(storeColumn, ndbRecAttr);
  }

  public boolean[] getBooleans(int column) {
    return getBooleans(storeColumns[column]);
  }

  public boolean[] getBooleans(Column storeColumn) {
    throw new ClusterJFatalInternalException(local.message("ERR_Not_Implemented"));
  }

  public byte getByte(int column) {
    return getByte(storeColumns[column]);
  }

  public byte getByte(Column storeColumn) {
    int index = storeColumn.getColumnId();
    NdbRecAttr ndbRecAttr = ndbRecAttrs[index];
    return Utility.getByte(storeColumn, ndbRecAttr);
  }

  public short getShort(int column) {
    return getShort(storeColumns[column]);
  }

  public short getShort(Column storeColumn) {
    int index = storeColumn.getColumnId();
    NdbRecAttr ndbRecAttr = ndbRecAttrs[index];
    return Utility.getShort(storeColumn, ndbRecAttr);
  }

  public int getInt(int column) {
    return getInt(storeColumns[column]);
  }

  public int getInt(Column storeColumn) {
    int index = storeColumn.getColumnId();
    NdbRecAttr ndbRecAttr = ndbRecAttrs[index];
    return Utility.getInt(storeColumn, ndbRecAttr);
  }

  public long getLong(int column) {
    return getLong(storeColumns[column]);
  }

  public long getLong(Column storeColumn) {
    int index = storeColumn.getColumnId();
    NdbRecAttr ndbRecAttr = ndbRecAttrs[index];
    return Utility.getLong(storeColumn, ndbRecAttr.int64_value());
  }

  public float getFloat(int column) {
    return getFloat(storeColumns[column]);
  }

  public float getFloat(Column storeColumn) {
    int index = storeColumn.getColumnId();
    float result = ndbRecAttrs[index].float_value();
    return result;
  }

  public double getDouble(int column) {
    return getDouble(storeColumns[column]);
  }

  public double getDouble(Column storeColumn) {
    int index = storeColumn.getColumnId();
    double result = ndbRecAttrs[index].double_value();
    return result;
  }

  public String getString(int column) {
    return getString(storeColumns[column]);
  }

  public String getString(Column storeColumn) {
    int index = storeColumn.getColumnId();
    NdbRecAttr ndbRecAttr = ndbRecAttrs[index];
    if (ndbRecAttr.isNULL() == 1) return null;
    int prefixLength = storeColumn.getPrefixLength();
    int actualLength;
    int offset = offsets[index];
    byteBuffer.limit(byteBuffer.capacity());
    switch (prefixLength) {
      case 0:
        actualLength = lengths[index];
        break;
      case 1:
        actualLength = (byteBuffer.get(offset) + 256) % 256;
        offset += 1;
        break;
      case 2:
        actualLength = (byteBuffer.get(offset) + 256) % 256;
        int length2 = (byteBuffer.get(offset + 1) + 256) % 256;
        actualLength += 256 * length2;
        offset += 2;
        break;
      default:
        throw new ClusterJFatalInternalException(
            local.message("ERR_Invalid_Prefix_Length", prefixLength));
    }

    byteBuffer.position(offset);
    byteBuffer.limit(offset + actualLength);

    String result = Utility.decode(byteBuffer, storeColumn.getCharsetNumber(), bufferManager);
    byteBuffer.clear();
    return result;
  }

  public byte[] getBytes(int column) {
    return getBytes(storeColumns[column]);
  }

  public byte[] getBytes(Column storeColumn) {
    int index = storeColumn.getColumnId();
    NdbRecAttr ndbRecAttr = ndbRecAttrs[index];
    if (ndbRecAttr.isNULL() == 1) return null;
    int prefixLength = storeColumn.getPrefixLength();
    int actualLength = lengths[index];
    int offset = offsets[index];
    switch (prefixLength) {
      case 0:
        break;
      case 1:
        actualLength = (byteBuffer.get(offset) + 256) % 256;
        offset += 1;
        break;
      case 2:
        actualLength = (byteBuffer.get(offset) + 256) % 256;
        int length2 = (byteBuffer.get(offset + 1) + 256) % 256;
        actualLength += 256 * length2;
        offset += 2;
        break;
      default:
        throw new ClusterJFatalInternalException(
            local.message("ERR_Invalid_Prefix_Length", prefixLength));
    }
    byteBuffer.position(offset);
    byte[] result = new byte[actualLength];
    byteBuffer.get(result);
    return result;
  }

  public Object getObject(int column) {
    return getObject(storeColumns[column]);
  }

  public Object getObject(Column storeColumn) {
    throw new ClusterJFatalInternalException(local.message("ERR_Implementation_Should_Not_Occur"));
  }

  public boolean wasNull(Column storeColumn) {
    throw new ClusterJFatalInternalException(local.message("ERR_Implementation_Should_Not_Occur"));
  }

  public Boolean getObjectBoolean(int column) {
    return getObjectBoolean(storeColumns[column]);
  }

  public Boolean getObjectBoolean(Column storeColumn) {
    int index = storeColumn.getColumnId();
    NdbRecAttr ndbRecAttr = ndbRecAttrs[index];
    if (ndbRecAttr.isNULL() == 1) {
      return null;
    } else {
      byte value = ndbRecAttr.int8_value();
      Boolean result = (Boolean.valueOf((value & 0x01) == 0x01));
      return result;
    }
  }

  public Byte getObjectByte(int column) {
    return getObjectByte(storeColumns[column]);
  }

  public Byte getObjectByte(Column storeColumn) {
    int index = storeColumn.getColumnId();
    NdbRecAttr ndbRecAttr = ndbRecAttrs[index];
    return (ndbRecAttr.isNULL() == 1) ? null : Utility.getByte(storeColumn, ndbRecAttr);
  }

  public Short getObjectShort(int column) {
    return getObjectShort(storeColumns[column]);
  }

  public Short getObjectShort(Column storeColumn) {
    int index = storeColumn.getColumnId();
    NdbRecAttr ndbRecAttr = ndbRecAttrs[index];
    return (ndbRecAttr.isNULL() == 1) ? null : Utility.getShort(storeColumn, ndbRecAttr);
  }

  public Integer getObjectInteger(int column) {
    return getObjectInteger(storeColumns[column]);
  }

  public Integer getObjectInteger(Column storeColumn) {
    int index = storeColumn.getColumnId();
    NdbRecAttr ndbRecAttr = ndbRecAttrs[index];
    return (ndbRecAttr.isNULL() == 1) ? null : Utility.getInt(storeColumn, ndbRecAttr);
  }

  public Long getObjectLong(int column) {
    return getObjectLong(storeColumns[column]);
  }

  public Long getObjectLong(Column storeColumn) {
    int index = storeColumn.getColumnId();
    NdbRecAttr ndbRecAttr = ndbRecAttrs[index];
    return (ndbRecAttr.isNULL() == 1)
        ? null
        : Utility.getLong(storeColumn, ndbRecAttr.int64_value());
  }

  public Float getObjectFloat(int column) {
    return getObjectFloat(storeColumns[column]);
  }

  public Float getObjectFloat(Column storeColumn) {
    int index = storeColumn.getColumnId();
    NdbRecAttr ndbRecAttr = ndbRecAttrs[index];
    return (ndbRecAttr.isNULL() == 1) ? null : getFloat(storeColumn);
  }

  public Double getObjectDouble(int column) {
    return getObjectDouble(storeColumns[column]);
  }

  public Double getObjectDouble(Column storeColumn) {
    int index = storeColumn.getColumnId();
    NdbRecAttr ndbRecAttr = ndbRecAttrs[index];
    return (ndbRecAttr.isNULL() == 1) ? null : getDouble(storeColumn);
  }

  public BigInteger getBigInteger(int column) {
    return getBigInteger(storeColumns[column]);
  }

  public BigInteger getBigInteger(Column storeColumn) {
    int index = storeColumn.getColumnId();
    int offset = offsets[index];
    int precision = storeColumn.getPrecision();
    int scale = storeColumn.getScale();
    int length = Utility.getDecimalColumnSpace(precision, scale);
    byteBuffer.position(offset);
    return Utility.getBigInteger(byteBuffer, length, precision, scale);
  }

  public BigDecimal getDecimal(int column) {
    return getDecimal(storeColumns[column]);
  }

  public BigDecimal getDecimal(Column storeColumn) {
    int index = storeColumn.getColumnId();
    int offset = offsets[index];
    int precision = storeColumn.getPrecision();
    int scale = storeColumn.getScale();
    int length = Utility.getDecimalColumnSpace(precision, scale);
    byteBuffer.position(offset);
    return Utility.getDecimal(byteBuffer, length, precision, scale);
  }

  private void handleError(Object object, NdbOperation ndbOperation) {
    if (object == null) {
      Utility.throwError(object, ndbOperation.getNdbError());
    }
  }

  public void setNoResult() {
    nextDone = true;
  }

  public Column[] getColumns() {
    return storeColumns;
  }
}
class OperationImpl implements Operation {

  /** My message translator */
  static final I18NHelper local = I18NHelper.getInstance(OperationImpl.class);

  /** My logger */
  static final Logger logger = LoggerFactoryService.getFactory().getInstance(OperationImpl.class);

  private NdbOperation ndbOperation;

  private ClusterTransactionImpl clusterTransaction;

  public OperationImpl(NdbOperation operation, ClusterTransactionImpl transaction) {
    this.ndbOperation = operation;
    this.clusterTransaction = transaction;
  }

  public void equalBoolean(Column storeColumn, boolean booleanValue) {
    throw new UnsupportedOperationException(local.message("ERR_NotImplemented"));
  }

  public void equalByte(Column storeColumn, byte b) {
    try {
      ndbOperation.equalInt(storeColumn.getName(), (int) b);
    } catch (NdbApiException ndbApiException) {
      throw new ClusterJDatastoreException(local.message("ERR_Datastore"), ndbApiException);
    }
  }

  public void equalBytes(Column storeColumn, byte[] value) {
    try {
      ndbOperation.equalBytes(storeColumn.getName(), value);
    } catch (NdbApiException ndbApiException) {
      throw new ClusterJDatastoreException(local.message("ERR_Datastore"), ndbApiException);
    }
  }

  public void equalDecimal(Column storeColumn, BigDecimal value) {
    try {
      ndbOperation.equalDecimal(storeColumn.getName(), value);
    } catch (NdbApiException ndbApiException) {
      throw new ClusterJDatastoreException(local.message("ERR_Datastore"), ndbApiException);
    }
  }

  public void equalInt(Column storeColumn, int value) {
    try {
      ndbOperation.equalInt(storeColumn.getName(), value);
    } catch (NdbApiException ndbApiException) {
      throw new ClusterJDatastoreException(local.message("ERR_Datastore"), ndbApiException);
    }
  }

  public void equalLong(Column storeColumn, long value) {
    try {
      ndbOperation.equalLong(storeColumn.getName(), value);
    } catch (NdbApiException ndbApiException) {
      throw new ClusterJDatastoreException(local.message("ERR_Datastore"), ndbApiException);
    }
  }

  public void equalString(Column storeColumn, String value) {
    try {
      ndbOperation.equalString(storeColumn.getName(), value);
    } catch (NdbApiException ndbApiException) {
      throw new ClusterJDatastoreException(local.message("ERR_Datastore"), ndbApiException);
    }
  }

  public void equalTimestamp(Column storeColumn, Timestamp value) {
    try {
      ndbOperation.equalTimestamp(storeColumn.getName(), value);
    } catch (NdbApiException ndbApiException) {
      throw new ClusterJDatastoreException(local.message("ERR_Datastore"), ndbApiException);
    }
  }

  public void equalDatetime(Column storeColumn, Timestamp value) {
    try {
      ndbOperation.equalDatetime(storeColumn.getName(), value);
    } catch (NdbApiException ndbApiException) {
      throw new ClusterJDatastoreException(local.message("ERR_Datastore"), ndbApiException);
    }
  }

  public void equalDate(Column storeColumn, Date value) {
    try {
      Timestamp timestamp = new Timestamp(((Date) value).getTime());
      ndbOperation.equalDatetime(storeColumn.getName(), timestamp);
    } catch (NdbApiException ndbApiException) {
      throw new ClusterJDatastoreException(local.message("ERR_Datastore"), ndbApiException);
    }
  }

  public void equalTime(Column storeColumn, Time value) {
    try {
      Timestamp timestamp = new Timestamp(((Time) value).getTime());
      ndbOperation.equalDatetime(storeColumn.getName(), timestamp);
    } catch (NdbApiException ndbApiException) {
      throw new ClusterJDatastoreException(local.message("ERR_Datastore"), ndbApiException);
    }
  }

  public void getBlob(Column storeColumn) {
    try {
      ndbOperation.getBlob(storeColumn.getName());
    } catch (NdbApiException ndbApiException) {
      throw new ClusterJDatastoreException(local.message("ERR_Datastore"), ndbApiException);
    }
  }

  public Blob getBlobHandle(Column storeColumn) {
    try {
      return new BlobImpl(ndbOperation.getBlobHandle(storeColumn.getName()));
    } catch (NdbApiException ndbApiException) {
      throw new ClusterJDatastoreException(local.message("ERR_Datastore"), ndbApiException);
    }
  }

  public void getValue(Column storeColumn) {
    try {
      ndbOperation.getValue(storeColumn.getName());
    } catch (NdbApiException ndbApiException) {
      throw new ClusterJDatastoreException(local.message("ERR_Datastore"), ndbApiException);
    }
  }

  public void postExecuteCallback(Runnable callback) {
    clusterTransaction.postExecuteCallback(callback);
  }

  public ResultData resultData() {
    // execute the transaction to get results
    clusterTransaction.executeNoCommit();
    return new ResultDataImpl(ndbOperation.resultData());
  }

  public void setBoolean(Column storeColumn, Boolean value) {
    throw new UnsupportedOperationException(local.message("ERR_NotImplemented"));
  }

  public void setByte(Column storeColumn, byte value) {
    try {
      ndbOperation.setInt(storeColumn.getName(), (int) value);
    } catch (NdbApiException ndbApiException) {
      throw new ClusterJDatastoreException(local.message("ERR_Datastore"), ndbApiException);
    }
  }

  public void setBytes(Column storeColumn, byte[] value) {
    try {
      ndbOperation.setBytes(storeColumn.getName(), value);
    } catch (NdbApiException ndbApiException) {
      throw new ClusterJDatastoreException(local.message("ERR_Datastore"), ndbApiException);
    }
  }

  public void setDate(Column storeColumn, Date value) {
    try {
      ndbOperation.setDate(storeColumn.getName(), value);
    } catch (NdbApiException ndbApiException) {
      throw new ClusterJDatastoreException(local.message("ERR_Datastore"), ndbApiException);
    }
  }

  public void setDecimal(Column storeColumn, BigDecimal value) {
    try {
      ndbOperation.setDecimal(storeColumn.getName(), value);
    } catch (NdbApiException ndbApiException) {
      throw new ClusterJDatastoreException(local.message("ERR_Datastore"), ndbApiException);
    }
  }

  public void setDouble(Column storeColumn, Double value) {
    try {
      ndbOperation.setDouble(storeColumn.getName(), value);
    } catch (NdbApiException ndbApiException) {
      throw new ClusterJDatastoreException(local.message("ERR_Datastore"), ndbApiException);
    }
  }

  public void setFloat(Column storeColumn, Float value) {
    try {
      ndbOperation.setFloat(storeColumn.getName(), value);
    } catch (NdbApiException ndbApiException) {
      throw new ClusterJDatastoreException(local.message("ERR_Datastore"), ndbApiException);
    }
  }

  public void setInt(Column storeColumn, Integer value) {
    try {
      ndbOperation.setInt(storeColumn.getName(), value);
    } catch (NdbApiException ndbApiException) {
      throw new ClusterJDatastoreException(local.message("ERR_Datastore"), ndbApiException);
    }
  }

  public void setLong(Column storeColumn, long value) {
    try {
      ndbOperation.setLong(storeColumn.getName(), value);
    } catch (NdbApiException ndbApiException) {
      throw new ClusterJDatastoreException(local.message("ERR_Datastore"), ndbApiException);
    }
  }

  public void setNull(Column storeColumn) {
    try {
      ndbOperation.setNull(storeColumn.getName());
    } catch (NdbApiException ndbApiException) {
      throw new ClusterJDatastoreException(local.message("ERR_Datastore"), ndbApiException);
    }
  }

  public void setShort(Column storeColumn, Short value) {
    try {
      ndbOperation.setShort(storeColumn.getName(), value);
    } catch (NdbApiException ndbApiException) {
      throw new ClusterJDatastoreException(local.message("ERR_Datastore"), ndbApiException);
    }
  }

  public void setString(Column storeColumn, String value) {
    try {
      ndbOperation.setString(storeColumn.getName(), value);
    } catch (NdbApiException ndbApiException) {
      throw new ClusterJDatastoreException(local.message("ERR_Datastore"), ndbApiException);
    }
  }

  public void setTime(Column storeColumn, Time value) {
    try {
      ndbOperation.setTime(storeColumn.getName(), value);
    } catch (NdbApiException ndbApiException) {
      throw new ClusterJDatastoreException(local.message("ERR_Datastore"), ndbApiException);
    }
  }

  public void setTimestamp(Column storeColumn, Timestamp value) {
    try {
      ndbOperation.setTimestamp(storeColumn.getName(), value);
    } catch (NdbApiException ndbApiException) {
      throw new ClusterJDatastoreException(local.message("ERR_Datastore"), ndbApiException);
    }
  }

  public void setDatetime(Column storeColumn, Timestamp value) {
    try {
      ndbOperation.setDatetime(storeColumn.getName(), value);
    } catch (NdbApiException ndbApiException) {
      throw new ClusterJDatastoreException(local.message("ERR_Datastore"), ndbApiException);
    }
  }
}
 public void equalBoolean(Column storeColumn, boolean booleanValue) {
   throw new UnsupportedOperationException(local.message("ERR_NotImplemented"));
 }
コード例 #30
0
class OperationImpl implements Operation {

  /** My message translator */
  static final I18NHelper local = I18NHelper.getInstance(OperationImpl.class);

  /** My logger */
  static final Logger logger = LoggerFactoryService.getFactory().getInstance(OperationImpl.class);

  private NdbOperation ndbOperation;

  protected List<Column> storeColumns = new ArrayList<Column>();

  protected ClusterTransactionImpl clusterTransaction;

  /** The size of the receive buffer for this operation (may be zero for non-read operations) */
  protected int bufferSize;

  /** The maximum column id for this operation (may be zero for non-read operations) */
  protected int maximumColumnId;

  /** The offsets into the buffer for each column (may be null for non-read operations) */
  protected int[] offsets;

  /** The lengths of fields in the buffer for each column (may be null for non-read operations) */
  protected int[] lengths;

  /** The maximum length of any column in this operation */
  protected int maximumColumnLength;

  protected BufferManager bufferManager;

  /**
   * Constructor used for insert and delete operations that do not need to read data.
   *
   * @param operation the operation
   * @param transaction the transaction
   */
  public OperationImpl(NdbOperation operation, ClusterTransactionImpl transaction) {
    this.ndbOperation = operation;
    this.clusterTransaction = transaction;
    this.bufferManager = clusterTransaction.getBufferManager();
  }

  /**
   * Constructor used for read operations. The table is used to obtain data used to lay out memory
   * for the result.
   *
   * @param storeTable the table
   * @param operation the operation
   * @param transaction the transaction
   */
  public OperationImpl(
      Table storeTable, NdbOperation operation, ClusterTransactionImpl transaction) {
    this(operation, transaction);
    TableImpl tableImpl = (TableImpl) storeTable;
    this.maximumColumnId = tableImpl.getMaximumColumnId();
    this.bufferSize = tableImpl.getBufferSize();
    this.offsets = tableImpl.getOffsets();
    this.lengths = tableImpl.getLengths();
    this.maximumColumnLength = tableImpl.getMaximumColumnLength();
  }

  public void equalBigInteger(Column storeColumn, BigInteger value) {
    ByteBuffer buffer = Utility.convertValue(storeColumn, value);
    int returnCode = ndbOperation.equal(storeColumn.getName(), buffer);
    handleError(returnCode, ndbOperation);
  }

  public void equalBoolean(Column storeColumn, boolean booleanValue) {
    byte value = (booleanValue ? (byte) 0x01 : (byte) 0x00);
    int returnCode = ndbOperation.equal(storeColumn.getName(), value);
    handleError(returnCode, ndbOperation);
  }

  public void equalByte(Column storeColumn, byte value) {
    int storeValue = Utility.convertByteValueForStorage(storeColumn, value);
    int returnCode = ndbOperation.equal(storeColumn.getName(), storeValue);
    handleError(returnCode, ndbOperation);
  }

  public void equalBytes(Column storeColumn, byte[] value) {
    if (logger.isDetailEnabled())
      logger.detail(
          "Column: "
              + storeColumn.getName()
              + " columnId: "
              + storeColumn.getColumnId()
              + " data length: "
              + value.length);
    ByteBuffer buffer = Utility.convertValue(storeColumn, value);
    int returnCode = ndbOperation.equal(storeColumn.getName(), buffer);
    handleError(returnCode, ndbOperation);
  }

  public void equalDecimal(Column storeColumn, BigDecimal value) {
    ByteBuffer buffer = Utility.convertValue(storeColumn, value);
    int returnCode = ndbOperation.equal(storeColumn.getName(), buffer);
    handleError(returnCode, ndbOperation);
  }

  public void equalDouble(Column storeColumn, double value) {
    ByteBuffer buffer = Utility.convertValue(storeColumn, value);
    int returnCode = ndbOperation.equal(storeColumn.getName(), buffer);
    handleError(returnCode, ndbOperation);
  }

  public void equalFloat(Column storeColumn, float value) {
    ByteBuffer buffer = Utility.convertValue(storeColumn, value);
    int returnCode = ndbOperation.equal(storeColumn.getName(), buffer);
    handleError(returnCode, ndbOperation);
  }

  public void equalInt(Column storeColumn, int value) {
    int returnCode = ndbOperation.equal(storeColumn.getName(), value);
    handleError(returnCode, ndbOperation);
  }

  public void equalShort(Column storeColumn, short value) {
    int storeValue = Utility.convertShortValueForStorage(storeColumn, value);
    int returnCode = ndbOperation.equal(storeColumn.getName(), storeValue);
    handleError(returnCode, ndbOperation);
  }

  public void equalLong(Column storeColumn, long value) {
    long storeValue = Utility.convertLongValueForStorage(storeColumn, value);
    int returnCode = ndbOperation.equal(storeColumn.getName(), storeValue);
    handleError(returnCode, ndbOperation);
  }

  public void equalString(Column storeColumn, String value) {
    ByteBuffer stringStorageBuffer = Utility.encode(value, storeColumn, bufferManager);
    int returnCode = ndbOperation.equal(storeColumn.getName(), stringStorageBuffer);
    bufferManager.clearStringStorageBuffer();
    handleError(returnCode, ndbOperation);
  }

  public void getBlob(Column storeColumn) {
    NdbBlob ndbBlob = ndbOperation.getBlobHandleM(storeColumn.getColumnId());
    handleError(ndbBlob, ndbOperation);
  }

  public Blob getBlobHandle(Column storeColumn) {
    NdbBlob blobHandle = ndbOperation.getBlobHandleM(storeColumn.getColumnId());
    handleError(blobHandle, ndbOperation);
    return new BlobImpl(blobHandle);
  }

  /**
   * Specify the columns to be used for the operation. For now, just save the columns. When
   * resultData is called, pass the columns to the ResultData constructor and then execute the
   * operation.
   */
  public void getValue(Column storeColumn) {
    storeColumns.add(storeColumn);
  }

  public void postExecuteCallback(Runnable callback) {
    clusterTransaction.postExecuteCallback(callback);
  }

  /** Construct a new ResultData using the saved column data and then execute the operation. */
  public ResultData resultData() {
    return resultData(true);
  }

  /** Construct a new ResultData and if requested, execute the operation. */
  public ResultData resultData(boolean execute) {
    if (logger.isDetailEnabled())
      logger.detail("storeColumns: " + Arrays.toString(storeColumns.toArray()));
    ResultDataImpl result;
    if (execute) {
      result =
          new ResultDataImpl(
              ndbOperation,
              storeColumns,
              maximumColumnId,
              bufferSize,
              offsets,
              lengths,
              bufferManager,
              false);
      clusterTransaction.executeNoCommit(false, true);
    } else {
      result =
          new ResultDataImpl(
              ndbOperation,
              storeColumns,
              maximumColumnId,
              bufferSize,
              offsets,
              lengths,
              bufferManager,
              true);
    }
    return result;
  }

  public void setBigInteger(Column storeColumn, BigInteger value) {
    ByteBuffer buffer = Utility.convertValue(storeColumn, value);
    int returnCode = ndbOperation.setValue(storeColumn.getColumnId(), buffer);
    handleError(returnCode, ndbOperation);
  }

  public void setBoolean(Column storeColumn, Boolean value) {
    byte byteValue = (value ? (byte) 0x01 : (byte) 0x00);
    setByte(storeColumn, byteValue);
  }

  public void setByte(Column storeColumn, byte value) {
    int storeValue = Utility.convertByteValueForStorage(storeColumn, value);
    int returnCode = ndbOperation.setValue(storeColumn.getColumnId(), storeValue);
    handleError(returnCode, ndbOperation);
  }

  public void setBytes(Column storeColumn, byte[] value) {
    // TODO use the string storage buffer instead of allocating a new buffer for each value
    int length = value.length;
    if (length > storeColumn.getLength()) {
      throw new ClusterJUserException(
          local.message(
              "ERR_Data_Too_Long", storeColumn.getName(), storeColumn.getLength(), length));
    }
    ByteBuffer buffer = Utility.convertValue(storeColumn, value);
    int returnCode = ndbOperation.setValue(storeColumn.getColumnId(), buffer);
    handleError(returnCode, ndbOperation);
  }

  public void setDecimal(Column storeColumn, BigDecimal value) {
    ByteBuffer buffer = Utility.convertValue(storeColumn, value);
    int returnCode = ndbOperation.setValue(storeColumn.getColumnId(), buffer);
    handleError(returnCode, ndbOperation);
  }

  public void setDouble(Column storeColumn, Double value) {
    int returnCode = ndbOperation.setValue(storeColumn.getColumnId(), value);
    handleError(returnCode, ndbOperation);
  }

  public void setFloat(Column storeColumn, Float value) {
    int returnCode = ndbOperation.setValue(storeColumn.getColumnId(), value);
    handleError(returnCode, ndbOperation);
  }

  public void setInt(Column storeColumn, Integer value) {
    int returnCode = ndbOperation.setValue(storeColumn.getColumnId(), value);
    handleError(returnCode, ndbOperation);
  }

  public void setLong(Column storeColumn, long value) {
    long storeValue = Utility.convertLongValueForStorage(storeColumn, value);
    int returnCode = ndbOperation.setValue(storeColumn.getColumnId(), storeValue);
    handleError(returnCode, ndbOperation);
  }

  public void setNull(Column storeColumn) {
    int returnCode = ndbOperation.setValue(storeColumn.getColumnId(), null);
    handleError(returnCode, ndbOperation);
  }

  public void setShort(Column storeColumn, Short value) {
    int storeValue = Utility.convertShortValueForStorage(storeColumn, value);
    int returnCode = ndbOperation.setValue(storeColumn.getName(), storeValue);
    handleError(returnCode, ndbOperation);
  }

  public void setString(Column storeColumn, String value) {
    ByteBuffer stringStorageBuffer = Utility.encode(value, storeColumn, bufferManager);
    int length = stringStorageBuffer.remaining() - storeColumn.getPrefixLength();
    if (length > storeColumn.getLength()) {
      throw new ClusterJUserException(
          local.message(
              "ERR_Data_Too_Long", storeColumn.getName(), storeColumn.getLength(), length));
    }
    int returnCode = ndbOperation.setValue(storeColumn.getColumnId(), stringStorageBuffer);
    bufferManager.clearStringStorageBuffer();
    handleError(returnCode, ndbOperation);
  }

  public int errorCode() {
    return ndbOperation.getNdbError().code();
  }

  protected void handleError(int returnCode, NdbOperation ndbOperation) {
    if (returnCode == 0) {
      return;
    } else {
      Utility.throwError(returnCode, ndbOperation.getNdbError());
    }
  }

  protected static void handleError(Object object, NdbOperation ndbOperation) {
    if (object != null) {
      return;
    } else {
      Utility.throwError(null, ndbOperation.getNdbError());
    }
  }

  public void beginDefinition() {
    // nothing to do
  }

  public void endDefinition() {
    // nothing to do
  }

  public int getErrorCode() {
    return ndbOperation.getNdbError().code();
  }

  public int getClassification() {
    return ndbOperation.getNdbError().classification();
  }

  public int getMysqlCode() {
    return ndbOperation.getNdbError().mysql_code();
  }

  public int getStatus() {
    return ndbOperation.getNdbError().status();
  }

  public void freeResourcesAfterExecute() {}
}