@Override public PMetaData updateResolvedTimestamp(PTable table, long resolvedTimestamp) throws SQLException { PMetaDataCache clone = metaData.clone(); clone.putDuplicate(table.getKey(), table, resolvedTimestamp); return new PMetaDataImpl(clone); }
/** * Used when the cache is growing past its max size to clone in a single pass. Removes least * recently used tables to get size of cache below its max size by the overage amount. */ public PMetaDataCache cloneMinusOverage(long overage) { assert (overage > 0); int nToRemove = Math.max( MIN_REMOVAL_SIZE, (int) Math.ceil((currentByteSize - maxByteSize) / ((double) currentByteSize / size())) + 1); MinMaxPriorityQueue<PTableRef> toRemove = BUILDER.expectedSize(nToRemove).create(); PMetaDataCache newCache = new PMetaDataCache(this.size(), this.maxByteSize, this.timeKeeper); long toRemoveBytes = 0; // Add to new cache, but track references to remove when done // to bring cache at least overage amount below it's max size. for (PTableRef tableRef : this.tables.values()) { newCache.put(tableRef.getTable().getKey(), new PTableRef(tableRef)); toRemove.add(tableRef); toRemoveBytes += tableRef.getEstSize(); while (toRemoveBytes - toRemove.peekLast().getEstSize() >= overage) { PTableRef removedRef = toRemove.removeLast(); toRemoveBytes -= removedRef.getEstSize(); } } for (PTableRef toRemoveRef : toRemove) { newCache.remove(toRemoveRef.getTable().getKey()); } return newCache; }
@Override public PMetaData removeColumn( PName tenantId, String tableName, List<PColumn> columnsToRemove, long tableTimeStamp, long tableSeqNum, long resolvedTime) throws SQLException { PTableRef tableRef = metaData.get(new PTableKey(tenantId, tableName)); if (tableRef == null) { return this; } PTable table = tableRef.getTable(); PMetaDataCache tables = metaData.clone(); for (PColumn columnToRemove : columnsToRemove) { PColumn column; String familyName = columnToRemove.getFamilyName().getString(); if (familyName == null) { column = table.getPKColumn(columnToRemove.getName().getString()); } else { column = table.getColumnFamily(familyName).getColumn(columnToRemove.getName().getString()); } int positionOffset = 0; int position = column.getPosition(); List<PColumn> oldColumns = table.getColumns(); if (table.getBucketNum() != null) { position--; positionOffset = 1; oldColumns = oldColumns.subList(positionOffset, oldColumns.size()); } List<PColumn> columns = Lists.newArrayListWithExpectedSize(oldColumns.size() - 1); columns.addAll(oldColumns.subList(0, position)); // Update position of columns that follow removed column for (int i = position + 1; i < oldColumns.size(); i++) { PColumn oldColumn = oldColumns.get(i); PColumn newColumn = new PColumnImpl( oldColumn.getName(), oldColumn.getFamilyName(), oldColumn.getDataType(), oldColumn.getMaxLength(), oldColumn.getScale(), oldColumn.isNullable(), i - 1 + positionOffset, oldColumn.getSortOrder(), oldColumn.getArraySize(), oldColumn.getViewConstant(), oldColumn.isViewReferenced(), null, oldColumn.isRowTimestamp()); columns.add(newColumn); } table = PTableImpl.makePTable(table, tableTimeStamp, tableSeqNum, columns); } tables.put(table.getKey(), table, resolvedTime); return new PMetaDataImpl(tables); }
@Override public PMetaData removeTable( PName tenantId, String tableName, String parentTableName, long tableTimeStamp) throws SQLException { PMetaDataCache tables = null; PTableRef parentTableRef = null; PTableKey key = new PTableKey(tenantId, tableName); if (metaData.get(key) == null) { if (parentTableName != null) { parentTableRef = metaData.get(new PTableKey(tenantId, parentTableName)); } if (parentTableRef == null) { return this; } } else { tables = metaData.clone(); PTable table = tables.remove(key); for (PTable index : table.getIndexes()) { tables.remove(index.getKey()); } if (table.getParentName() != null) { parentTableRef = tables.get(new PTableKey(tenantId, table.getParentName().getString())); } } // also remove its reference from parent table if (parentTableRef != null) { List<PTable> oldIndexes = parentTableRef.getTable().getIndexes(); if (oldIndexes != null && !oldIndexes.isEmpty()) { List<PTable> newIndexes = Lists.newArrayListWithExpectedSize(oldIndexes.size()); newIndexes.addAll(oldIndexes); for (int i = 0; i < newIndexes.size(); i++) { PTable index = newIndexes.get(i); if (index.getName().getString().equals(tableName)) { newIndexes.remove(i); PTable parentTable = PTableImpl.makePTable( parentTableRef.getTable(), tableTimeStamp == HConstants.LATEST_TIMESTAMP ? parentTableRef.getTable().getTimeStamp() : tableTimeStamp, newIndexes); if (tables == null) { tables = metaData.clone(); } tables.put(parentTable.getKey(), parentTable, parentTableRef.getResolvedTimeStamp()); break; } } } } return tables == null ? this : new PMetaDataImpl(tables); }
@Override public PMetaData pruneTables(Pruner pruner) { List<PTableKey> keysToPrune = Lists.newArrayListWithExpectedSize(this.size()); for (PTable table : this) { if (pruner.prune(table)) { keysToPrune.add(table.getKey()); } } if (keysToPrune.isEmpty()) { return this; } PMetaDataCache tables = metaData.clone(); for (PTableKey key : keysToPrune) { tables.remove(key); } return new PMetaDataImpl(tables); }
@Override public PTableRef getTableRef(PTableKey key) throws TableNotFoundException { PTableRef ref = metaData.get(key); if (ref == null) { throw new TableNotFoundException(key.getName()); } return ref; }
@Override public PMetaData pruneFunctions(Pruner pruner) { List<PTableKey> keysToPrune = Lists.newArrayListWithExpectedSize(this.size()); for (PFunction function : this.metaData.functions.values()) { if (pruner.prune(function)) { keysToPrune.add(function.getKey()); } } if (keysToPrune.isEmpty()) { return this; } PMetaDataCache clone = metaData.clone(); for (PTableKey key : keysToPrune) { clone.functions.remove(key); } return new PMetaDataImpl(clone); }
@Override public PMetaData addColumn( PName tenantId, String tableName, List<PColumn> columnsToAdd, long tableTimeStamp, long tableSeqNum, boolean isImmutableRows, boolean isWalDisabled, boolean isMultitenant, boolean storeNulls, boolean isTransactional, long updateCacheFrequency, long resolvedTime) throws SQLException { PTableRef oldTableRef = metaData.get(new PTableKey(tenantId, tableName)); if (oldTableRef == null) { return this; } List<PColumn> oldColumns = PTableImpl.getColumnsToClone(oldTableRef.getTable()); List<PColumn> newColumns; if (columnsToAdd.isEmpty()) { newColumns = oldColumns; } else { newColumns = Lists.newArrayListWithExpectedSize(oldColumns.size() + columnsToAdd.size()); newColumns.addAll(oldColumns); newColumns.addAll(columnsToAdd); } PTable newTable = PTableImpl.makePTable( oldTableRef.getTable(), tableTimeStamp, tableSeqNum, newColumns, isImmutableRows, isWalDisabled, isMultitenant, storeNulls, isTransactional, updateCacheFrequency); return addTable(newTable, resolvedTime); }
@Override public Iterator<PTable> iterator() { return metaData.iterator(); }
@Override public PMetaData addTable(PTable table, long resolvedTime) throws SQLException { int netGain = 0; PTableKey key = table.getKey(); PTableRef oldTableRef = metaData.get(key); if (oldTableRef != null) { netGain -= oldTableRef.getEstSize(); } PTable newParentTable = null; long parentResolvedTimestamp = resolvedTime; if (table.getParentName() != null) { // Upsert new index table into parent data table list String parentName = table.getParentName().getString(); PTableRef oldParentRef = metaData.get(new PTableKey(table.getTenantId(), parentName)); // If parentTable isn't cached, that's ok we can skip this if (oldParentRef != null) { List<PTable> oldIndexes = oldParentRef.getTable().getIndexes(); List<PTable> newIndexes = Lists.newArrayListWithExpectedSize(oldIndexes.size() + 1); newIndexes.addAll(oldIndexes); for (int i = 0; i < newIndexes.size(); i++) { PTable index = newIndexes.get(i); if (index.getName().equals(table.getName())) { newIndexes.remove(i); break; } } newIndexes.add(table); netGain -= oldParentRef.getEstSize(); newParentTable = PTableImpl.makePTable(oldParentRef.getTable(), table.getTimeStamp(), newIndexes); netGain += newParentTable.getEstimatedSize(); } } if (newParentTable == null) { // Don't count in gain if we found a parent table, as its accounted for in // newParentTable netGain += table.getEstimatedSize(); } long overage = metaData.getCurrentSize() + netGain - metaData.getMaxSize(); PMetaDataCache newMetaData = overage <= 0 ? metaData.clone() : metaData.cloneMinusOverage(overage); if (newParentTable != null) { // Upsert new index table into parent data table list newMetaData.put(newParentTable.getKey(), newParentTable, parentResolvedTimestamp); newMetaData.putDuplicate(table.getKey(), table, resolvedTime); } else { newMetaData.put(table.getKey(), table, resolvedTime); } for (PTable index : table.getIndexes()) { newMetaData.putDuplicate(index.getKey(), index, resolvedTime); } return new PMetaDataImpl(newMetaData); }
@Override public int size() { return metaData.size(); }
private PMetaDataImpl(PMetaDataCache metaData) { this.metaData = metaData.clone(); }