Esempio n. 1
0
  /**
   * 增加判断支持未配置分片的表走默认的dataNode
   *
   * @param schemaConfig
   * @param tableName
   * @return
   */
  public static boolean isNoSharding(SchemaConfig schemaConfig, String tableName) {
    if (schemaConfig.isNoSharding()) {
      return true;
    }
    if (schemaConfig.getDataNode() != null && !schemaConfig.getTables().containsKey(tableName)) {
      return true;
    }

    return false;
  }
Esempio n. 2
0
 public static boolean isAllGlobalTable(DruidShardingParseInfo ctx, SchemaConfig schema) {
   boolean isAllGlobal = false;
   for (String table : ctx.getTables()) {
     if (schema.getTables().get(table).isGlobalTable()) {
       isAllGlobal = true;
     } else {
       return false;
     }
   }
   return isAllGlobal;
 }
Esempio n. 3
0
 /**
  * 根据标名随机获取一个节点
  *
  * @param schema 数据库名
  * @param table 表名
  * @return 数据节点
  * @author mycat
  */
 private static String getMetaReadDataNode(SchemaConfig schema, String table) {
   // Table名字被转化为大写的,存储在schema
   table = table.toUpperCase();
   String dataNode = null;
   Map<String, TableConfig> tables = schema.getTables();
   TableConfig tc;
   if (tables != null && (tc = tables.get(table)) != null) {
     dataNode = tc.getRandomDataNode();
   }
   return dataNode;
 }
Esempio n. 4
0
 public static boolean processInsert(
     SchemaConfig schema, int sqlType, String origSQL, ServerConnection sc)
     throws SQLNonTransientException {
   String tableName = StringUtil.getTableName(origSQL).toUpperCase();
   TableConfig tableConfig = schema.getTables().get(tableName);
   boolean processedInsert = false;
   if (null != tableConfig && tableConfig.isAutoIncrement()) {
     String primaryKey = tableConfig.getPrimaryKey();
     processedInsert = processInsert(sc, schema, sqlType, origSQL, tableName, primaryKey);
   }
   return processedInsert;
 }
Esempio n. 5
0
  public static void routeForTableMeta(
      RouteResultset rrs, SchemaConfig schema, String tableName, String sql) {
    String dataNode = null;
    if (isNoSharding(schema, tableName)) { // 不分库的直接从schema中获取dataNode
      dataNode = schema.getDataNode();
    } else {
      dataNode = getMetaReadDataNode(schema, tableName);
    }

    RouteResultsetNode[] nodes = new RouteResultsetNode[1];
    nodes[0] = new RouteResultsetNode(dataNode, rrs.getSqlType(), sql);
    if (rrs.getCanRunInReadDB() != null) {
      nodes[0].setCanRunInReadDB(rrs.getCanRunInReadDB());
    }
    rrs.setNodes(nodes);
  }
Esempio n. 6
0
  public static boolean processERChildTable(
      final SchemaConfig schema, final String origSQL, final ServerConnection sc)
      throws SQLNonTransientException {
    String tableName = StringUtil.getTableName(origSQL).toUpperCase();
    final TableConfig tc = schema.getTables().get(tableName);

    if (null != tc && tc.isChildTable()) {
      final RouteResultset rrs = new RouteResultset(origSQL, ServerParse.INSERT);
      String joinKey = tc.getJoinKey();
      MySqlInsertStatement insertStmt =
          (MySqlInsertStatement) (new MySqlStatementParser(origSQL)).parseInsert();
      int joinKeyIndex = getJoinKeyIndex(insertStmt.getColumns(), joinKey);

      if (joinKeyIndex == -1) {
        String inf = "joinKey not provided :" + tc.getJoinKey() + "," + insertStmt;
        LOGGER.warn(inf);
        throw new SQLNonTransientException(inf);
      }
      if (isMultiInsert(insertStmt)) {
        String msg = "ChildTable multi insert not provided";
        LOGGER.warn(msg);
        throw new SQLNonTransientException(msg);
      }

      String joinKeyVal = insertStmt.getValues().getValues().get(joinKeyIndex).toString();

      String sql = insertStmt.toString();

      // try to route by ER parent partion key
      RouteResultset theRrs = RouterUtil.routeByERParentKey(sql, rrs, tc, joinKeyVal);

      if (theRrs != null) {
        rrs.setFinishedRoute(true);
        sc.getSession2().execute(rrs, ServerParse.INSERT);
        return true;
      }

      // route by sql query root parent's datanode
      final String findRootTBSql = tc.getLocateRTableKeySql().toLowerCase() + joinKeyVal;
      if (LOGGER.isDebugEnabled()) {
        LOGGER.debug("find root parent's node sql " + findRootTBSql);
      }

      ListenableFuture<String> listenableFuture =
          MycatServer.getInstance()
              .getListeningExecutorService()
              .submit(
                  new Callable<String>() {
                    @Override
                    public String call() throws Exception {
                      FetchStoreNodeOfChildTableHandler fetchHandler =
                          new FetchStoreNodeOfChildTableHandler();
                      return fetchHandler.execute(
                          schema.getName(), findRootTBSql, tc.getRootParent().getDataNodes());
                    }
                  });

      Futures.addCallback(
          listenableFuture,
          new FutureCallback<String>() {
            @Override
            public void onSuccess(String result) {
              if (Strings.isNullOrEmpty(result)) {
                StringBuilder s = new StringBuilder();
                LOGGER.warn(
                    s.append(sc.getSession2()).append(origSQL).toString()
                        + " err:"
                        + "can't find (root) parent sharding node for sql:"
                        + origSQL);
                sc.writeErrMessage(
                    ErrorCode.ER_PARSE_ERROR,
                    "can't find (root) parent sharding node for sql:" + origSQL);
                return;
              }

              if (LOGGER.isDebugEnabled()) {
                LOGGER.debug(
                    "found partion node for child table to insert " + result + " sql :" + origSQL);
              }

              RouteResultset executeRrs = RouterUtil.routeToSingleNode(rrs, result, origSQL);
              sc.getSession2().execute(executeRrs, ServerParse.INSERT);
            }

            @Override
            public void onFailure(Throwable t) {
              StringBuilder s = new StringBuilder();
              LOGGER.warn(
                  s.append(sc.getSession2()).append(origSQL).toString() + " err:" + t.getMessage());
              sc.writeErrMessage(ErrorCode.ER_PARSE_ERROR, t.getMessage() + " " + s.toString());
            }
          },
          MycatServer.getInstance().getListeningExecutorService());
      return true;
    }
    return false;
  }
Esempio n. 7
0
  /**
   * 处理分库表路由
   *
   * @param schema
   * @param tablesAndConditions
   * @param tablesRouteMap
   * @throws SQLNonTransientException
   */
  public static void findRouteWithcConditionsForTables(
      SchemaConfig schema,
      RouteResultset rrs,
      Map<String, Map<String, Set<ColumnRoutePair>>> tablesAndConditions,
      Map<String, Set<String>> tablesRouteMap,
      String sql,
      LayerCachePool cachePool,
      boolean isSelect)
      throws SQLNonTransientException {
    // 为分库表找路由
    for (Map.Entry<String, Map<String, Set<ColumnRoutePair>>> entry :
        tablesAndConditions.entrySet()) {
      String tableName = entry.getKey().toUpperCase();
      TableConfig tableConfig = schema.getTables().get(tableName);
      if (tableConfig == null) {
        String msg =
            "can't find table define in schema " + tableName + " schema:" + schema.getName();
        LOGGER.warn(msg);
        throw new SQLNonTransientException(msg);
      }
      // 全局表或者不分库的表略过(全局表后面再计算)
      if (tableConfig.isGlobalTable()
          || schema.getTables().get(tableName).getDataNodes().size() == 1) {
        continue;
      } else { // 非全局表:分库表、childTable、其他
        Map<String, Set<ColumnRoutePair>> columnsMap = entry.getValue();
        String joinKey = tableConfig.getJoinKey();
        String partionCol = tableConfig.getPartitionColumn();
        String primaryKey = tableConfig.getPrimaryKey();
        boolean isFoundPartitionValue =
            partionCol != null && entry.getValue().get(partionCol) != null;
        boolean isLoadData = false;
        if (LOGGER.isDebugEnabled()) {
          if (sql.startsWith(LoadData.loadDataHint)
              || rrs.isLoadData()) { // 由于load data一次会计算很多路由数据,如果输出此日志会极大降低load data的性能
            isLoadData = true;
          }
        }
        if (entry.getValue().get(primaryKey) != null
            && entry.getValue().size() == 1
            && !isLoadData) { // 主键查找
          // try by primary key if found in cache
          Set<ColumnRoutePair> primaryKeyPairs = entry.getValue().get(primaryKey);
          if (primaryKeyPairs != null) {
            if (LOGGER.isDebugEnabled()) {
              LOGGER.debug("try to find cache by primary key ");
            }
            String tableKey = schema.getName() + '_' + tableName;
            boolean allFound = true;
            for (ColumnRoutePair pair : primaryKeyPairs) { // 可能id in(1,2,3)多主键
              String cacheKey = pair.colValue;
              String dataNode = (String) cachePool.get(tableKey, cacheKey);
              if (dataNode == null) {
                allFound = false;
                continue;
              } else {
                if (tablesRouteMap.get(tableName) == null) {
                  tablesRouteMap.put(tableName, new HashSet<String>());
                }
                tablesRouteMap.get(tableName).add(dataNode);
                continue;
              }
            }
            if (!allFound) {
              // need cache primary key ->datanode relation
              if (isSelect && tableConfig.getPrimaryKey() != null) {
                rrs.setPrimaryKey(tableKey + '.' + tableConfig.getPrimaryKey());
              }
            } else { // 主键缓存中找到了就执行循环的下一轮
              continue;
            }
          }
        }
        if (isFoundPartitionValue) { // 分库表
          Set<ColumnRoutePair> partitionValue = columnsMap.get(partionCol);
          if (partitionValue == null || partitionValue.size() == 0) {
            if (tablesRouteMap.get(tableName) == null) {
              tablesRouteMap.put(tableName, new HashSet<String>());
            }
            tablesRouteMap.get(tableName).addAll(tableConfig.getDataNodes());
          } else {
            for (ColumnRoutePair pair : partitionValue) {
              if (pair.colValue != null) {
                Integer nodeIndex =
                    tableConfig.getRule().getRuleAlgorithm().calculate(pair.colValue);
                if (nodeIndex == null) {
                  String msg =
                      "can't find any valid datanode :"
                          + tableConfig.getName()
                          + " -> "
                          + tableConfig.getPartitionColumn()
                          + " -> "
                          + pair.colValue;
                  LOGGER.warn(msg);
                  throw new SQLNonTransientException(msg);
                }
                String node = tableConfig.getDataNodes().get(nodeIndex);
                if (node != null) {
                  if (tablesRouteMap.get(tableName) == null) {
                    tablesRouteMap.put(tableName, new HashSet<String>());
                  }
                  tablesRouteMap.get(tableName).add(node);
                }
              }
              if (pair.rangeValue != null) {
                Integer[] nodeIndexs =
                    tableConfig
                        .getRule()
                        .getRuleAlgorithm()
                        .calculateRange(
                            pair.rangeValue.beginValue.toString(),
                            pair.rangeValue.endValue.toString());
                for (Integer idx : nodeIndexs) {
                  String node = tableConfig.getDataNodes().get(idx);
                  if (node != null) {
                    if (tablesRouteMap.get(tableName) == null) {
                      tablesRouteMap.put(tableName, new HashSet<String>());
                    }
                    tablesRouteMap.get(tableName).add(node);
                  }
                }
              }
            }
          }
        } else if (joinKey != null
            && columnsMap.get(joinKey) != null
            && columnsMap.get(joinKey).size()
                != 0) { // childTable  (如果是select 语句的父子表join)之前要找到root table,将childTable移除,只留下root
                        // table
          Set<ColumnRoutePair> joinKeyValue = columnsMap.get(joinKey);

          ColumnRoutePair joinCol = null;

          Set<String> dataNodeSet = ruleByJoinValueCalculate(rrs, tableConfig, joinKeyValue);

          if (dataNodeSet.isEmpty()) {
            throw new SQLNonTransientException("parent key can't find any valid datanode ");
          }
          if (LOGGER.isDebugEnabled()) {
            LOGGER.debug(
                "found partion nodes (using parent partion rule directly) for child table to update  "
                    + Arrays.toString(dataNodeSet.toArray())
                    + " sql :"
                    + sql);
          }
          if (dataNodeSet.size() > 1) {
            routeToMultiNode(rrs.isCacheAble(), rrs, dataNodeSet, sql);
            rrs.setFinishedRoute(true);
            return;
          } else {
            rrs.setCacheAble(true);
            routeToSingleNode(rrs, dataNodeSet.iterator().next(), sql);
            return;
          }

        } else {
          // 没找到拆分字段,该表的所有节点都路由
          if (tablesRouteMap.get(tableName) == null) {
            tablesRouteMap.put(tableName, new HashSet<String>());
          }
          tablesRouteMap.get(tableName).addAll(tableConfig.getDataNodes());
        }
      }
    }
  }
Esempio n. 8
0
  /**
   * 单表路由
   *
   * @param schema
   * @param ctx
   * @param tableName
   * @param rrs
   * @param isSelect
   * @return
   * @throws SQLNonTransientException
   */
  public static RouteResultset tryRouteForOneTable(
      SchemaConfig schema,
      DruidShardingParseInfo ctx,
      RouteCalculateUnit routeUnit,
      String tableName,
      RouteResultset rrs,
      boolean isSelect,
      LayerCachePool cachePool)
      throws SQLNonTransientException {
    if (isNoSharding(schema, tableName)) {
      return routeToSingleNode(rrs, schema.getDataNode(), ctx.getSql());
    }

    TableConfig tc = schema.getTables().get(tableName);
    if (tc == null) {
      String msg = "can't find table define in schema " + tableName + " schema:" + schema.getName();
      LOGGER.warn(msg);
      throw new SQLNonTransientException(msg);
    }
    if (tc.isGlobalTable()) { // 全局表
      if (isSelect) {
        // global select ,not cache route result
        rrs.setCacheAble(false);
        return routeToSingleNode(rrs, tc.getRandomDataNode(), ctx.getSql());
      } else { // insert into 全局表的记录
        return routeToMultiNode(false, rrs, tc.getDataNodes(), ctx.getSql(), true);
      }
    } else { // 单表或者分库表
      if (!checkRuleRequired(schema, ctx, routeUnit, tc)) {
        throw new IllegalArgumentException(
            "route rule for table " + tc.getName() + " is required: " + ctx.getSql());
      }
      if (tc.getPartitionColumn() == null && !tc.isSecondLevel()) { // 单表且不是childTable
        //				return RouterUtil.routeToSingleNode(rrs, tc.getDataNodes().get(0),ctx.getSql());
        return routeToMultiNode(rrs.isCacheAble(), rrs, tc.getDataNodes(), ctx.getSql());
      } else {
        // 每个表对应的路由映射
        Map<String, Set<String>> tablesRouteMap = new HashMap<String, Set<String>>();
        if (routeUnit.getTablesAndConditions() != null
            && routeUnit.getTablesAndConditions().size() > 0) {
          RouterUtil.findRouteWithcConditionsForTables(
              schema,
              rrs,
              routeUnit.getTablesAndConditions(),
              tablesRouteMap,
              ctx.getSql(),
              cachePool,
              isSelect);
          if (rrs.isFinishedRoute()) {
            return rrs;
          }
        }

        if (tablesRouteMap.get(tableName) == null) {
          return routeToMultiNode(rrs.isCacheAble(), rrs, tc.getDataNodes(), ctx.getSql());
        } else {
          //					boolean isCache = rrs.isCacheAble();
          //					if(tablesRouteMap.get(tableName).size() > 1) {
          //
          //					}
          return routeToMultiNode(
              rrs.isCacheAble(), rrs, tablesRouteMap.get(tableName), ctx.getSql());
        }
      }
    }
  }
Esempio n. 9
0
  /**
   * 多表路由
   *
   * @param schema
   * @param ctx
   * @param tables
   * @param rrs
   * @param isSelect
   * @return
   * @throws SQLNonTransientException
   */
  public static RouteResultset tryRouteForTables(
      SchemaConfig schema,
      DruidShardingParseInfo ctx,
      RouteCalculateUnit routeUnit,
      RouteResultset rrs,
      boolean isSelect,
      LayerCachePool cachePool)
      throws SQLNonTransientException {
    List<String> tables = ctx.getTables();
    if (schema.isNoSharding() || (tables.size() >= 1 && isNoSharding(schema, tables.get(0)))) {
      return routeToSingleNode(rrs, schema.getDataNode(), ctx.getSql());
    }

    // 只有一个表的
    if (tables.size() == 1) {
      return RouterUtil.tryRouteForOneTable(
          schema, ctx, routeUnit, tables.get(0), rrs, isSelect, cachePool);
    }

    Set<String> retNodesSet = new HashSet<String>();
    // 每个表对应的路由映射
    Map<String, Set<String>> tablesRouteMap = new HashMap<String, Set<String>>();

    // 分库解析信息不为空
    Map<String, Map<String, Set<ColumnRoutePair>>> tablesAndConditions =
        routeUnit.getTablesAndConditions();
    if (tablesAndConditions != null && tablesAndConditions.size() > 0) {
      // 为分库表找路由
      RouterUtil.findRouteWithcConditionsForTables(
          schema, rrs, tablesAndConditions, tablesRouteMap, ctx.getSql(), cachePool, isSelect);
      if (rrs.isFinishedRoute()) {
        return rrs;
      }
    }

    // 为全局表和单库表找路由
    for (String tableName : tables) {
      TableConfig tableConfig = schema.getTables().get(tableName.toUpperCase());
      if (tableConfig == null) {
        String msg =
            "can't find table define in schema " + tableName + " schema:" + schema.getName();
        LOGGER.warn(msg);
        throw new SQLNonTransientException(msg);
      }
      if (tableConfig.isGlobalTable()) { // 全局表
        if (tablesRouteMap.get(tableName) == null) {
          tablesRouteMap.put(tableName, new HashSet<String>());
        }
        tablesRouteMap.get(tableName).addAll(tableConfig.getDataNodes());
      } else if (tablesRouteMap.get(tableName) == null) { // 余下的表都是单库表
        tablesRouteMap.put(tableName, new HashSet<String>());
        tablesRouteMap.get(tableName).addAll(tableConfig.getDataNodes());
      }
    }

    boolean isFirstAdd = true;
    for (Map.Entry<String, Set<String>> entry : tablesRouteMap.entrySet()) {
      if (entry.getValue() == null || entry.getValue().size() == 0) {
        throw new SQLNonTransientException("parent key can't find any valid datanode ");
      } else {
        if (isFirstAdd) {
          retNodesSet.addAll(entry.getValue());
          isFirstAdd = false;
        } else {
          retNodesSet.retainAll(entry.getValue());
          if (retNodesSet.size() == 0) { // 两个表的路由无交集
            String errMsg =
                "invalid route in sql, multi tables found but datanode has no intersection "
                    + " sql:"
                    + ctx.getSql();
            LOGGER.warn(errMsg);
            throw new SQLNonTransientException(errMsg);
          }
        }
      }
    }

    if (retNodesSet != null && retNodesSet.size() > 0) {
      if (retNodesSet.size() > 1 && isAllGlobalTable(ctx, schema)) {
        // mulit routes ,not cache route result
        if (isSelect) {
          rrs.setCacheAble(false);
          routeToSingleNode(rrs, retNodesSet.iterator().next(), ctx.getSql());
        } else { // delete 删除全局表的记录
          routeToMultiNode(isSelect, rrs, retNodesSet, ctx.getSql(), true);
        }

      } else {
        routeToMultiNode(isSelect, rrs, retNodesSet, ctx.getSql());
      }
    }
    return rrs;
  }
Esempio n. 10
0
  private void loadSchemas(Element root) {
    NodeList list = root.getElementsByTagName("schema");
    for (int i = 0, n = list.getLength(); i < n; i++) {
      Element schemaElement = (Element) list.item(i);
      String name = schemaElement.getAttribute("name");
      String dataNode = schemaElement.getAttribute("dataNode");
      String checkSQLSchemaStr = schemaElement.getAttribute("checkSQLschema");
      String sqlMaxLimitStr = schemaElement.getAttribute("sqlMaxLimit");
      int sqlMaxLimit = -1;
      if (sqlMaxLimitStr != null && !sqlMaxLimitStr.isEmpty()) {
        sqlMaxLimit = Integer.valueOf(sqlMaxLimitStr);
      }
      // check dataNode already exists or not
      String defaultDbType = null;
      if (dataNode != null && !dataNode.isEmpty()) {
        List<String> dataNodeLst = new ArrayList<String>(1);
        dataNodeLst.add(dataNode);
        checkDataNodeExists(dataNodeLst);
        String dataHost = dataNodes.get(dataNode).getDataHost();
        defaultDbType = dataHosts.get(dataHost).getDbType();
      } else {
        dataNode = null;
      }
      Map<String, TableConfig> tables = loadTables(schemaElement);
      if (schemas.containsKey(name)) {
        throw new ConfigException("schema " + name + " duplicated!");
      }

      // 设置了table的不需要设置dataNode属性,没有设置table的必须设置dataNode属性
      if (dataNode == null && tables.size() == 0) {
        throw new ConfigException(
            "schema " + name + " didn't config tables,so you must set dataNode property!");
      }

      SchemaConfig schemaConfig =
          new SchemaConfig(
              name, dataNode, tables, sqlMaxLimit, "true".equalsIgnoreCase(checkSQLSchemaStr));
      if (defaultDbType != null) {
        schemaConfig.setDefaultDataNodeDbType(defaultDbType);
        if (!"mysql".equalsIgnoreCase(defaultDbType)) {
          schemaConfig.setNeedSupportMultiDBType(true);
        }
      }

      // 判断是否有不是mysql的数据库类型,方便解析判断是否启用多数据库分页语法解析

      for (String tableName : tables.keySet()) {
        TableConfig tableConfig = tables.get(tableName);
        if (isHasMultiDbType(tableConfig)) {
          schemaConfig.setNeedSupportMultiDBType(true);
          break;
        }
      }
      Map<String, String> dataNodeDbTypeMap = new HashMap<>();
      for (String dataNodeName : dataNodes.keySet()) {
        DataNodeConfig dataNodeConfig = dataNodes.get(dataNodeName);
        String dataHost = dataNodeConfig.getDataHost();
        DataHostConfig dataHostConfig = dataHosts.get(dataHost);
        if (dataHostConfig != null) {
          String dbType = dataHostConfig.getDbType();
          dataNodeDbTypeMap.put(dataNodeName, dbType);
        }
      }
      schemaConfig.setDataNodeDbTypeMap(dataNodeDbTypeMap);
      schemas.put(name, schemaConfig);
    }
  }