private class DependencyTracker { private final Map<Integer, ArrayDeque<VoltTable>> m_depsById = new ConcurrentHashMap<Integer, ArrayDeque<VoltTable>>(); private final Logger hostLog = Logger.getLogger("HOST", VoltLoggerFactory.instance()); /** * Add a single dependency. Exists only for test cases. * * @param depId * @param vt */ void addDependency(final int depId, final VoltTable vt) { ArrayDeque<VoltTable> deque = m_depsById.get(depId); if (deque == null) { deque = new ArrayDeque<VoltTable>(); m_depsById.put(depId, deque); } deque.add(vt); } /** * Store dependency tables for later retrieval by the EE. * * @param workunit */ void trackNewWorkUnit(final Map<Integer, List<VoltTable>> dependencies) { for (final Entry<Integer, List<VoltTable>> e : dependencies.entrySet()) { // could do this optionally - debug only. if (d) verifyDependencySanity(e.getKey(), e.getValue()); // create a new list of references to the workunit's table // to avoid any changes to the WorkUnit's list. But do not // copy the table data. ArrayDeque<VoltTable> deque = m_depsById.get(e.getKey()); if (deque == null) { deque = new ArrayDeque<VoltTable>(); // intentionally overwrite the previous dependency id. // would a lookup and a clear() be faster? m_depsById.put(e.getKey(), deque); } else { deque.clear(); } deque.addAll(e.getValue()); } if (d) LOG.debug("Current InputDepencies:\n" + StringUtil.formatMaps(m_depsById)); } public VoltTable nextDependency(final int dependencyId) { // this formulation retains an arraydeque in the tracker that is // overwritten by the next transaction using this dependency id. If // the EE requests all dependencies (as is expected), the deque // will not retain any references to VoltTables (which is the goal). final ArrayDeque<VoltTable> vtstack = m_depsById.get(dependencyId); if (vtstack != null && vtstack.size() > 0) { // java doc. says this amortized constant time. return vtstack.pop(); } else if (vtstack == null) { assert (false) : "receive without associated tracked dependency. [depId=" + dependencyId + "]"; return null; } else { return null; } } /** * Log and exit if a dependency list fails an invariant. * * @param dependencyId * @param dependencies */ void verifyDependencySanity(final Integer dependencyId, final List<VoltTable> dependencies) { if (dependencies == null) { hostLog.l7dlog( Level.FATAL, LogKeys.host_ExecutionSite_DependencyNotFound.name(), new Object[] {dependencyId}, null); VoltDB.crashVoltDB(); } for (final Object dependency : dependencies) { if (dependency == null) { hostLog.l7dlog( Level.FATAL, LogKeys.host_ExecutionSite_DependencyContainedNull.name(), new Object[] {dependencyId}, null); VoltDB.crashVoltDB(); } if (!(dependency instanceof VoltTable)) { hostLog.l7dlog( Level.FATAL, LogKeys.host_ExecutionSite_DependencyNotVoltTable.name(), new Object[] {dependencyId}, null); VoltDB.crashVoltDB(); } if (t) LOG.trace(String.format("Storing Dependency %d\n:%s", dependencyId, dependency)); } // FOR } }
/** * Class that maps object values to partitions. It's rather simple really. It'll get more * complicated if you give it time. */ public abstract class TheHashinator { static int partitionCount; private static final Logger hostLogger = Logger.getLogger("HOST", VoltLoggerFactory.instance()); /** * Initialize TheHashinator * * @param catalog A pointer to the catalog data structure. */ public static void initialize(Catalog catalog) { Cluster cluster = catalog.getClusters().get("cluster"); partitionCount = cluster.getNum_partitions(); } /** * Given a long value, pick a partition to store the data. * * @param value The value to hash. * @param partitionCount The number of partitions to choose from. * @return A value between 0 and partitionCount-1, hopefully pretty evenly distributed. */ static int hashinate(long value, int partitionCount) { int index = (int) (value ^ (value >>> 32)); return java.lang.Math.abs(index % partitionCount); } /** * Given an Object value, pick a partition to store the data. Currently only String objects can be * hashed. * * @param value The value to hash. * @param partitionCount The number of partitions to choose from. * @return A value between 0 and partitionCount-1, hopefully pretty evenly distributed. */ static int hashinate(Object value, int partitionCount) { if (value instanceof String) { String string = (String) value; try { byte bytes[] = string.getBytes("UTF-8"); int hashCode = 0; int offset = 0; for (int ii = 0; ii < bytes.length; ii++) { hashCode = 31 * hashCode + bytes[offset++]; } return java.lang.Math.abs(hashCode % partitionCount); } catch (UnsupportedEncodingException e) { hostLogger.l7dlog( Level.FATAL, LogKeys.host_TheHashinator_ExceptionHashingString.name(), new Object[] {string}, e); HStore.crashDB(); } } hostLogger.l7dlog( Level.FATAL, LogKeys.host_TheHashinator_AttemptedToHashinateNonLongOrString.name(), new Object[] {value.getClass().getName()}, null); HStore.crashDB(); return -1; } /** * Given an object, map it to a partition. * * @param obj The object to be mapped to a partition. * @return The id of the partition desired. */ public static int hashToPartition(Object obj) { return (hashToPartition(obj, TheHashinator.partitionCount)); } /** * Given an object and a number of partitions, map the object to a partition. * * @param obj The object to be mapped to a partition. * @param partitionCount The number of partitions TheHashinator will use * @return The id of the partition desired. */ public static int hashToPartition(Object obj, int partitionCount) { int index = 0; if (obj instanceof Long) { long value = ((Long) obj).longValue(); index = hashinate(value, partitionCount); } else if (obj instanceof String) { index = hashinate(obj, partitionCount); } else if (obj instanceof Integer) { long value = (long) ((Integer) obj).intValue(); index = hashinate(value, partitionCount); } else if (obj instanceof Short) { long value = (long) ((Short) obj).shortValue(); index = hashinate(value, partitionCount); } else if (obj instanceof Byte) { long value = (long) ((Byte) obj).byteValue(); index = hashinate(value, partitionCount); } return index; } }