/** * Copy the current partition into T * * @param T the target partition object */ private void copy(Partition T) { if (T == null) { T = new Partition(); } System.arraycopy(Pt_x, 0, T.Pt_x, 0, Pt_x.length); System.arraycopy(Pt, 0, T.Pt, 0, Pt.length); T.L = L; T.counter = counter; double[][] mArray = Py_t.getArray(); double[][] tgtArray = T.Py_t.getArray(); for (int i = 0; i < mArray.length; i++) { System.arraycopy(mArray[i], 0, tgtArray[i], 0, mArray[0].length); } }
private void addPartition( String databaseName, String tableName, CatalogProtos.PartitionDescProto partitionDescProto) { HiveCatalogStoreClientPool.HiveCatalogStoreClient client = null; try { client = clientPool.getClient(); Partition partition = new Partition(); partition.setDbName(databaseName); partition.setTableName(tableName); List<String> values = Lists.newArrayList(); for (CatalogProtos.PartitionKeyProto keyProto : partitionDescProto.getPartitionKeysList()) { values.add(keyProto.getPartitionValue()); } partition.setValues(values); Table table = client.getHiveClient().getTable(databaseName, tableName); StorageDescriptor sd = table.getSd(); sd.setLocation(partitionDescProto.getPath()); partition.setSd(sd); client.getHiveClient().add_partition(partition); } catch (Exception e) { throw new TajoInternalError(e); } finally { if (client != null) { client.release(); } } }
@Override public boolean hasNext() { if (next != null) return true; next = new ArrayList<Partition>(); while (it.hasNext()) { Entry<Long, Interval> entry = it.next(); Interval intv = entry.getValue(); if (jumpToStart && (intv.getEnd() <= start)) { continue; } else { jumpToStart = false; } if (partitioningSpec.type == _type.TIME) { next.add(intv.partitions.values().iterator().next()); } else { for (Partition p : intv.partitions.values()) { if ((partitionValueFilter == null) || (partitionValueFilter.contains(p.getValue()))) { next.add(p); } } } if (!next.isEmpty()) { break; } } if (next.isEmpty()) { next = null; return false; } else { return true; } }
private long calculateAllocatedUsableCapacityInBytes() { long allocatedUsableCapacityInBytes = 0; for (Partition partition : partitionMap.values()) { allocatedUsableCapacityInBytes += partition.getReplicaCapacityInBytes(); } return allocatedUsableCapacityInBytes; }
/** * Load an out-of-core partition in memory. * * @param partitionId Partition id */ private void loadPartition(Integer partitionId) { if (loadedPartition != null) { if (loadedPartition.getId() == partitionId) { return; } if (LOG.isInfoEnabled()) { LOG.info("loadPartition: moving partition " + loadedPartition.getId() + " out of core"); } try { writePartition(loadedPartition); onDiskPartitions.put(loadedPartition.getId(), loadedPartition.getVertices().size()); loadedPartition = null; } catch (IOException e) { throw new IllegalStateException( "loadPartition: failed writing " + "partition " + loadedPartition.getId() + " to disk", e); } } if (LOG.isInfoEnabled()) { LOG.info("loadPartition: loading partition " + partitionId + " in memory"); } try { loadedPartition = readPartition(partitionId); } catch (IOException e) { throw new IllegalStateException( "loadPartition: failed reading " + "partition " + partitionId + " from disk"); } }
/** @param args the command line arguments */ public static void main(String[] args) { int[] arr = {45, 54, 67, 56, 87, 89, 65, 35, 64, 4}; Partition p = new Partition(); p.printArray(arr); p.partition(arr, arr.length - 1, 6); p.printArray(arr); }
/** * Adds Partition to and validates Partition is unique. A duplicate Partition results in an * exception. */ private void addPartition(Partition partition) { if (partitionMap.put(ByteBuffer.wrap(partition.getBytes()), partition) != null) { throw new IllegalStateException("Duplicate Partition detected: " + partition.toString()); } if (partition.getId() >= maxPartitionId) { maxPartitionId = partition.getId() + 1; } }
/** * Opens existing lag partition if it exists or creates new one if parent journal is configured to * have lag partitions. * * @return Lag partition instance. * @throws com.nfsdb.exceptions.JournalException */ public Partition<T> openOrCreateLagPartition() throws JournalException { Partition<T> result = getIrregularPartition(); if (result == null) { result = createTempPartition(); setIrregularPartition(result); } return result.open(); }
static void load_process(Process p) { for (Partition current_partition : partition) { if (current_partition.load(p)) { new Execute(p, current_partition).start(); return; } } System.out.println("No memory available"); }
/** * Generates a clusterer. * * @param data the training instances * @throws Exception if something goes wrong */ @Override public void buildClusterer(Instances data) throws Exception { // can clusterer handle the data ? getCapabilities().testWithFail(data); m_replaceMissing = new ReplaceMissingValues(); Instances instances = new Instances(data); instances.setClassIndex(-1); m_replaceMissing.setInputFormat(instances); data = weka.filters.Filter.useFilter(instances, m_replaceMissing); instances = null; // initialize all fields that are not being set via options m_data = data; m_numInstances = m_data.numInstances(); m_numAttributes = m_data.numAttributes(); random = new Random(getSeed()); // initialize the statistics of the input training data input = sIB_ProcessInput(); // object to hold the best partition bestT = new Partition(); // the real clustering double bestL = Double.NEGATIVE_INFINITY; for (int k = 0; k < m_numRestarts; k++) { if (m_verbose) { System.out.format("restart number %s...\n", k); } // initialize the partition and optimize it Partition tmpT = sIB_InitT(input); tmpT = sIB_OptimizeT(tmpT, input); // if a better partition is found, save it if (tmpT.L > bestL) { tmpT.copy(bestT); bestL = bestT.L; } if (m_verbose) { System.out.println("\nPartition status : "); System.out.println("------------------"); System.out.println(tmpT.toString() + "\n"); } } if (m_verbose) { System.out.println("\nBest Partition"); System.out.println("==============="); System.out.println(bestT.toString()); } // save memory m_data = new Instances(m_data, 0); }
public long getPartitionInStateCount(PartitionState partitionState) { int count = 0; for (Partition partition : partitionMap.values()) { if (partition.getPartitionState() == partitionState) { count++; } } return count; }
@Override public void addPartition(Partition<I, V, E, M> partition) { Lock lock = createLock(partition.getId()); if (lock == null) { throw new IllegalStateException( "addPartition: partition " + partition.getId() + " already exists"); } addPartitionNoLock(partition); lock.unlock(); }
public JSONObject toJSONObject() throws JSONException { JSONObject jsonObject = new JSONObject() .put("clusterName", hardwareLayout.getClusterName()) .put("version", version) .put("partitions", new JSONArray()); for (Partition partition : partitionMap.values()) { jsonObject.accumulate("partitions", partition.toJSONObject()); } return jsonObject; }
@Override public void addPartition(Partition<I, V, E> partition) { Partition<I, V, E> oldPartition = partitions.get(partition.getId()); if (oldPartition == null) { oldPartition = partitions.putIfAbsent(partition.getId(), partition); if (oldPartition == null) { return; } } oldPartition.addPartition(partition); }
public static Properties getHiveSchema(Partition partition, Table table) { // Mimics function in Hive: MetaStoreUtils.getSchema(Partition, Table) return getHiveSchema( partition.getStorage(), partition.getColumns(), table.getDataColumns(), table.getParameters(), table.getDatabaseName(), table.getTableName(), table.getPartitionColumns()); }
/** * Write a partition to disk. * * @param partition The partition object to write * @throws java.io.IOException */ private void writePartition(Partition<I, V, E, M> partition) throws IOException { File file = new File(getPartitionPath(partition.getId())); file.getParentFile().mkdirs(); file.createNewFile(); DataOutputStream outputStream = new DataOutputStream(new BufferedOutputStream(new FileOutputStream(file))); for (Vertex<I, V, E, M> vertex : partition.getVertices()) { vertex.write(outputStream); } outputStream.close(); }
protected void validatePartitionIds() { for (Partition partition : partitionMap.values()) { long partitionId = partition.getId(); if (partitionId < MinPartitionId) { throw new IllegalStateException("Partition has invalid ID: Less than " + MinPartitionId); } if (partitionId >= maxPartitionId) { throw new IllegalStateException( "Partition has invalid ID: Greater than or equal to " + maxPartitionId); } } }
private void rollback0(long address, boolean writeDiscard) throws JournalException { if (address == -1L) { notifyTxError(); throw new IncompatibleJournalException( "Server txn is not compatible with %s", this.getLocation()); } txLog.read(address, tx); if (tx.address == 0) { throw new JournalException("Invalid transaction address"); } if (writeDiscard) { LOGGER.info( "Journal %s is rolling back to transaction #%d, timestamp %s", metadata.getLocation(), tx.txn, Dates.toString(tx.timestamp)); writeDiscardFile(tx.journalMaxRowID); } // partitions need to be dealt with first to make sure new lag is assigned a correct // partitionIndex rollbackPartitions(tx); Partition<T> lag = getIrregularPartition(); if (tx.lagName != null && tx.lagName.length() > 0 && (lag == null || !tx.lagName.equals(lag.getName()))) { Partition<T> newLag = createTempPartition(tx.lagName); setIrregularPartition(newLag); newLag.applyTx(tx.lagSize, tx.lagIndexPointers); } else if (lag != null && tx.lagName == null) { removeIrregularPartitionInternal(); } else if (lag != null) { lag.truncate(tx.lagSize); } if (tx.symbolTableSizes.length == 0) { for (int i = 0, sz = getSymbolTableCount(); i < sz; i++) { getSymbolTable(i).truncate(); } } else { for (int i = 0, sz = getSymbolTableCount(); i < sz; i++) { getSymbolTable(i).truncate(tx.symbolTableSizes[i]); } } appendTimestampLo = -1; appendTimestampHi = -1; appendPartition = null; txLog.writeTxAddress(tx.address); txActive = false; }
protected void validateUniqueness() { // Validate uniqueness of each logical component. Partition uniqueness is validated by method // addPartition. Set<Replica> replicaSet = new HashSet<Replica>(); for (Partition partition : partitionMap.values()) { for (Replica replica : partition.getReplicas()) { if (!replicaSet.add(replica)) { throw new IllegalStateException("Duplicate Replica detected: " + replica.toString()); } } } }
@Override public Map<EntityID, Integer> computeNeededAgents(List<Partition> partitions, int agents) { Map<EntityID, Integer> neededAgentMap = new FastMap<EntityID, Integer>(); int numberOfPartitions = 0; for (Partition partition : partitions) { numberOfPartitions++; if (numberOfPartitions <= agents) { neededAgentMap.put(partition.getId(), 1); } else { neededAgentMap.put(partition.getId(), 0); } } return neededAgentMap; }
// Creates a Partition and corresponding Replicas for each specified disk public Partition addNewPartition(List<Disk> disks, long replicaCapacityInBytes) { if (disks == null || disks.size() == 0) { throw new IllegalArgumentException("Disks either null or of zero length."); } Partition partition = new Partition(getNewPartitionId(), PartitionState.READ_WRITE, replicaCapacityInBytes); for (Disk disk : disks) { partition.addReplica(new Replica(partition, disk)); } addPartition(partition); validate(); return partition; }
/** * Read a partition from disk. * * @param partitionId Id of the partition to read * @return The partition object * @throws IOException */ private Partition<I, V, E, M> readPartition(Integer partitionId) throws IOException { Partition<I, V, E, M> partition = new Partition<I, V, E, M>(conf, partitionId); File file = new File(getPartitionPath(partitionId)); DataInputStream inputStream = new DataInputStream(new BufferedInputStream(new FileInputStream(file))); int numVertices = onDiskPartitions.get(partitionId); for (int i = 0; i < numVertices; ++i) { Vertex<I, V, E, M> vertex = conf.createVertex(); vertex.readFields(inputStream); partition.putVertex(vertex); } inputStream.close(); file.delete(); return partition; }
/** * Initialize the partition * * @param input object holding the statistics of the training data * @return the initialized partition */ private Partition sIB_InitT(Input input) { Partition T = new Partition(); int avgSize = (int) Math.ceil((double) m_numInstances / m_numCluster); ArrayList<Integer> permInstsIdx = new ArrayList<Integer>(); ArrayList<Integer> unassigned = new ArrayList<Integer>(); for (int i = 0; i < m_numInstances; i++) { unassigned.add(i); } while (unassigned.size() != 0) { int t = random.nextInt(unassigned.size()); permInstsIdx.add(unassigned.get(t)); unassigned.remove(t); } for (int i = 0; i < m_numCluster; i++) { int r2 = avgSize > permInstsIdx.size() ? permInstsIdx.size() : avgSize; for (int j = 0; j < r2; j++) { T.Pt_x[permInstsIdx.get(j)] = i; } for (int j = 0; j < r2; j++) { permInstsIdx.remove(0); } } // initialize the prior prob of each cluster, and the probability // for each attribute within the cluster for (int i = 0; i < m_numCluster; i++) { ArrayList<Integer> indices = T.find(i); for (int j = 0; j < indices.size(); j++) { T.Pt[i] += input.Px[indices.get(j)]; } double[][] mArray = input.Pyx.getArray(); for (int j = 0; j < m_numAttributes; j++) { double sum = 0.0; for (int k = 0; k < indices.size(); k++) { sum += mArray[j][indices.get(k)]; } sum /= T.Pt[i]; T.Py_t.set(j, i, sum); } } if (m_verbose) { System.out.println("Initializing..."); } return T; }
@Override public String toString() { StringBuffer text = new StringBuffer(); text.append("\nsIB\n===\n"); text.append("\nNumber of clusters: " + m_numCluster + "\n"); for (int j = 0; j < m_numCluster; j++) { text.append( "\nCluster: " + j + " Size : " + bestT.size(j) + " Prior probability: " + Utils.doubleToString(bestT.Pt[j], 4) + "\n\n"); for (int i = 0; i < m_numAttributes; i++) { text.append("Attribute: " + m_data.attribute(i).name() + "\n"); text.append( "Probability given the cluster = " + Utils.doubleToString(bestT.Py_t.get(i, j), 4) + "\n"); } } return text.toString(); }
public Partition<T> getAppendPartition(long timestamp) throws JournalException { int sz = partitions.size(); if (sz > 0) { Partition<T> par = partitions.getQuick(sz - 1); Interval interval = par.getInterval(); if (interval == null || interval.contains(timestamp)) { return par.open().access(); } else if (interval.isBefore(timestamp)) { return createPartition(new Interval(timestamp, getMetadata().getPartitionType()), sz); } else { throw new JournalException("%s cannot be appended to %s", Dates.toString(timestamp), this); } } else { return createPartition(new Interval(timestamp, getMetadata().getPartitionType()), 0); } }
/** * Add an object to the end of the Journal. * * @param obj the object to add * @throws com.nfsdb.exceptions.JournalException if there is an error */ public void append(T obj) throws JournalException { if (obj == null) { throw new JournalException("Cannot append NULL to %s", this); } if (!txActive) { beginTx(); } if (checkOrder) { long timestamp = getTimestamp(obj); if (timestamp > appendTimestampHi) { switchAppendPartition(timestamp); } if (timestamp < appendTimestampLo) { throw new JournalException( "Cannot insert records out of order. maxHardTimestamp=%d (%s), timestamp=%d (%s): %s", appendTimestampLo, Dates.toString(appendTimestampLo), timestamp, Dates.toString(timestamp), this); } appendPartition.append(obj); appendTimestampLo = timestamp; } else { getAppendPartition().append(obj); } }
/** * Draw a instance out from a cluster. * * @param instIdx index of the instance to be drawn out * @param t index of the cluster which the instance previously belong to * @param T the current working partition * @param input the input statistics */ private void reduce_x(int instIdx, int t, Partition T, Input input) { // Update the prior probability of the cluster ArrayList<Integer> indices = T.find(t); double sum = 0.0; for (int i = 0; i < indices.size(); i++) { if (indices.get(i) == instIdx) { continue; } sum += input.Px[indices.get(i)]; } T.Pt[t] = sum; if (T.Pt[t] < 0) { System.out.format("Warning: probability < 0 (%s)\n", T.Pt[t]); T.Pt[t] = 0; } // Update prob of each attribute in the cluster double[][] mArray = input.Pyx.getArray(); for (int i = 0; i < m_numAttributes; i++) { sum = 0.0; for (int j = 0; j < indices.size(); j++) { if (indices.get(j) == instIdx) { continue; } sum += mArray[i][indices.get(j)]; } T.Py_t.set(i, t, sum / T.Pt[t]); } }
/** * Add a new partition without requiring a lock. * * @param partition Partition to be added */ private void addPartitionNoLock(Partition<I, V, E, M> partition) { synchronized (inMemoryPartitions) { if (inMemoryPartitions.size() + 1 < maxInMemoryPartitions) { inMemoryPartitions.put(partition.getId(), partition); return; } } try { writePartition(partition); onDiskPartitions.put(partition.getId(), partition.getVertices().size()); } catch (IOException e) { throw new IllegalStateException( "addPartition: failed writing " + "partition " + partition.getId() + "to disk"); } }
/** @see ConnectionFactory#getLeader(Partition) */ @Override public BrokerAddress getLeader(Partition partition) { BrokerAddress leader = null; try { this.lock.readLock().lock(); leader = getMetadataCache().getLeader(partition); } finally { this.lock.readLock().unlock(); } if (leader == null) { try { this.lock.writeLock().lock(); // double lock check leader = getMetadataCache().getLeader(partition); if (leader == null) { refreshMetadata(Collections.singleton(partition.getTopic())); leader = getMetadataCache().getLeader(partition); } } finally { this.lock.writeLock().unlock(); } } if (leader == null) { throw new PartitionNotFoundException(partition); } return leader; }
public static Partition fromMetastoreApiPartition( org.apache.hadoop.hive.metastore.api.Partition partition) { StorageDescriptor storageDescriptor = partition.getSd(); if (storageDescriptor == null) { throw new PrestoException( HIVE_INVALID_METADATA, "Partition does not contain a storage descriptor: " + partition); } Partition.Builder partitionBuilder = Partition.builder() .setDatabaseName(partition.getDbName()) .setTableName(partition.getTableName()) .setValues(partition.getValues()) .setColumns( storageDescriptor .getCols() .stream() .map(MetastoreUtil::fromMetastoreApiFieldSchema) .collect(toList())) .setParameters(partition.getParameters()); fromMetastoreApiStorageDescriptor( storageDescriptor, partitionBuilder.getStorageBuilder(), format("%s.%s", partition.getTableName(), partition.getValues())); return partitionBuilder.build(); }