예제 #1
0
  /**
   * Should only be called by ColumnFamilyStore.apply via Keyspace.apply, which supplies the
   * appropriate OpOrdering.
   *
   * <p>replayPosition should only be null if this is a secondary index, in which case it is
   * *expected* to be null
   */
  long put(PartitionUpdate update, UpdateTransaction indexer, OpOrder.Group opGroup) {
    AtomicBTreePartition previous = partitions.get(update.partitionKey());

    long initialSize = 0;
    if (previous == null) {
      final DecoratedKey cloneKey = allocator.clone(update.partitionKey(), opGroup);
      AtomicBTreePartition empty = new AtomicBTreePartition(cfs.metadata, cloneKey, allocator);
      // We'll add the columns later. This avoids wasting works if we get beaten in the putIfAbsent
      previous = partitions.putIfAbsent(cloneKey, empty);
      if (previous == null) {
        previous = empty;
        // allocate the row overhead after the fact; this saves over allocating and having to free
        // after, but
        // means we can overshoot our declared limit.
        int overhead = (int) (cloneKey.getToken().getHeapSize() + ROW_OVERHEAD_HEAP_SIZE);
        allocator.onHeap().allocate(overhead, opGroup);
        initialSize = 8;
      } else {
        allocator.reclaimer().reclaimImmediately(cloneKey);
      }
    }

    long[] pair = previous.addAllWithSizeDelta(update, opGroup, indexer);
    minTimestamp = Math.min(minTimestamp, previous.stats().minTimestamp);
    liveDataSize.addAndGet(initialSize + pair[0]);
    columnsCollector.update(update.columns());
    statsCollector.update(update.stats());
    currentOperations.addAndGet(update.operationCount());
    return pair[1];
  }
  private void performWrites() {
    final MeterInternalCallContext context = new MeterInternalCallContext();

    // This is the atomic operation: bulk insert the new aggregated TimelineChunk objects, and
    // delete
    // or invalidate the ones that were aggregated.  This should be very fast.
    final long startWriteTime = System.currentTimeMillis();
    aggregatorSqlDao.begin();
    timelineDao.bulkInsertTimelineChunks(chunksToWrite, context);
    if (config.getDeleteAggregatedChunks()) {
      aggregatorSqlDao.deleteTimelineChunks(chunkIdsToInvalidateOrDelete, context);
    } else {
      aggregatorSqlDao.makeTimelineChunksInvalid(chunkIdsToInvalidateOrDelete, context);
    }
    aggregatorSqlDao.commit();
    msWritingDb.addAndGet(System.currentTimeMillis() - startWriteTime);

    timelineChunksWritten.addAndGet(chunksToWrite.size());
    timelineChunksInvalidatedOrDeleted.addAndGet(chunkIdsToInvalidateOrDelete.size());
    chunksToWrite.clear();
    chunkIdsToInvalidateOrDelete.clear();
    final long sleepMs = config.getAggregationSleepBetweenBatches().getMillis();
    if (sleepMs > 0) {
      final long timeBeforeSleep = System.currentTimeMillis();
      try {
        Thread.sleep(sleepMs);
      } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
      }
      msSpentSleeping.addAndGet(System.currentTimeMillis() - timeBeforeSleep);
    }
    timelineChunkBatchesProcessed.incrementAndGet();
  }
  private void performMerge(List<StoreFileScanner> scanners, HStore store, StoreFile.Writer writer)
      throws IOException {
    InternalScanner scanner = null;
    try {
      Scan scan = new Scan();

      // Include deletes
      scanner =
          new StoreScanner(
              store,
              store.scanInfo,
              scan,
              scanners,
              ScanType.MAJOR_COMPACT,
              Long.MIN_VALUE,
              Long.MIN_VALUE);

      ArrayList<KeyValue> kvs = new ArrayList<KeyValue>();

      while (scanner.next(kvs) || kvs.size() != 0) {
        numKV.addAndGet(kvs.size());
        for (KeyValue kv : kvs) {
          totalBytes.addAndGet(kv.getLength());
          writer.append(kv);
        }
        kvs.clear();
      }
    } finally {
      if (scanner != null) scanner.close();
    }
  }
 /** addAndGet adds given value to current, and returns current value */
 public void testAddAndGet() {
   AtomicLong ai = new AtomicLong(1);
   assertEquals(3, ai.addAndGet(2));
   assertEquals(3, ai.get());
   assertEquals(-1, ai.addAndGet(-4));
   assertEquals(-1, ai.get());
 }
예제 #5
0
  synchronized boolean put(KeyBuffer keyBuffer, byte[] data, boolean ifAbsent, byte[] old) {
    long sz = sizeOf(keyBuffer, data);
    while (freeCapacity.get() < sz)
      if (!evictOne()) {
        remove(keyBuffer);
        return false;
      }

    byte[] existing = map.get(keyBuffer);
    if (ifAbsent || old != null) {
      if (ifAbsent && existing != null) return false;
      if (old != null && existing != null && !Arrays.equals(old, existing)) return false;
    }

    map.put(keyBuffer, data);
    lru.remove(keyBuffer);
    lru.addFirst(keyBuffer);

    if (existing != null) {
      freeCapacity.addAndGet(sizeOf(keyBuffer, existing));
      putReplaceCount++;
    } else putAddCount++;

    freeCapacity.addAndGet(-sz);

    return true;
  }
    @Override
    public void messageReceived(final RecoveryFileChunkRequest request, TransportChannel channel)
        throws Exception {
      try (RecoveriesCollection.RecoveryRef recoveryRef =
          onGoingRecoveries.getRecoverySafe(request.recoveryId(), request.shardId())) {
        final RecoveryTarget status = recoveryRef.status();
        final RecoveryState.Index indexState = status.state().getIndex();
        if (request.sourceThrottleTimeInNanos() != RecoveryState.Index.UNKNOWN) {
          indexState.addSourceThrottling(request.sourceThrottleTimeInNanos());
        }

        RateLimiter rateLimiter = recoverySettings.rateLimiter();
        if (rateLimiter != null) {
          long bytes = bytesSinceLastPause.addAndGet(request.content().length());
          if (bytes > rateLimiter.getMinPauseCheckBytes()) {
            // Time to pause
            bytesSinceLastPause.addAndGet(-bytes);
            long throttleTimeInNanos = rateLimiter.pause(bytes);
            indexState.addTargetThrottling(throttleTimeInNanos);
            status.indexShard().recoveryStats().addThrottleTime(throttleTimeInNanos);
          }
        }

        status.writeFileChunk(
            request.metadata(),
            request.position(),
            request.content(),
            request.lastChunk(),
            request.totalTranslogOps());
      }
      channel.sendResponse(TransportResponse.Empty.INSTANCE);
    }
예제 #7
0
 private void reserveSpace(long size, boolean committed) {
   Preconditions.checkState(
       size <= mAvailableBytes.get(), "Available bytes should always be non-negative ");
   mAvailableBytes.addAndGet(-size);
   if (committed) {
     mCommittedBytes.addAndGet(size);
   }
 }
예제 #8
0
 private void reclaimSpace(long size, boolean committed) {
   Preconditions.checkState(
       mCapacityBytes >= mAvailableBytes.get() + size,
       "Available bytes should always be less than total capacity bytes");
   mAvailableBytes.addAndGet(size);
   if (committed) {
     mCommittedBytes.addAndGet(-size);
   }
 }
예제 #9
0
  private void resolve(DecoratedKey key, ColumnFamily cf) {
    currentThroughput.addAndGet(cf.size());
    currentOperations.addAndGet(
        (cf.getColumnCount() == 0) ? cf.isMarkedForDelete() ? 1 : 0 : cf.getColumnCount());

    ColumnFamily oldCf = columnFamilies.putIfAbsent(key, cf);
    if (oldCf == null) return;

    oldCf.resolve(cf);
  }
예제 #10
0
 /**
  * Try to reserve memory needed for task execution and return true if succeeded. Tasks have a
  * shared pool of memory which they should ask for in advance before they even try to allocate it.
  *
  * <p>This method is another backpressure mechanism to make sure we do not exhaust system's
  * resources by running too many tasks at the same time. Tasks are expected to reserve memory
  * before proceeding with their execution and making sure they release it when done.
  *
  * @param m - requested number of bytes
  * @return true if there is enough free memory
  */
 public static boolean tryReserveTaskMem(long m) {
   if (!CAN_ALLOC) return false;
   assert m >= 0 : "m < 0: " + m;
   long current = _taskMem.addAndGet(-m);
   if (current < 0) {
     current = _taskMem.addAndGet(m);
     return false;
   }
   return true;
 }
예제 #11
0
  @Override
  public void execute(Tuple input) {

    tpsCounter.count();

    Long tupleId = input.getLong(0);
    Pair pair = (Pair) input.getValue(1);

    Pair trade = null;
    Pair customer = null;

    Tuple tradeTuple = null;
    Tuple customerTuple = null;

    if (input.getSourceComponent().equals(SequenceTopologyDef.CUSTOMER_BOLT_NAME)) {
      customer = pair;
      customerTuple = input;

      tradeTuple = tradeMap.remove(tupleId);
      if (tradeTuple == null) {
        customerMap.put(tupleId, input);
        return;
      }

      trade = (Pair) tradeTuple.getValue(1);

    } else if (input.getSourceComponent().equals(SequenceTopologyDef.TRADE_BOLT_NAME)) {
      trade = pair;
      tradeTuple = input;

      customerTuple = customerMap.remove(tupleId);
      if (customerTuple == null) {
        tradeMap.put(tupleId, input);
        return;
      }

      customer = (Pair) customerTuple.getValue(1);
    } else {
      LOG.info("Unknow source component: " + input.getSourceComponent());
      collector.fail(input);
      return;
    }

    tradeSum.addAndGet(trade.getValue());
    customerSum.addAndGet(customer.getValue());

    collector.ack(tradeTuple);
    collector.ack(customerTuple);

    TradeCustomer tradeCustomer = new TradeCustomer();
    tradeCustomer.setTrade(trade);
    tradeCustomer.setCustomer(customer);
    collector.emit(new Values(tupleId, tradeCustomer));
  }
예제 #12
0
  @Override
  public Map<NamedKey, byte[]> getBulk(Iterable<NamedKey> keys) {
    try (ResourceHolder<MemcachedClientIF> clientHolder = client.get()) {
      Map<String, NamedKey> keyLookup =
          Maps.uniqueIndex(
              keys,
              new Function<NamedKey, String>() {
                @Override
                public String apply(@Nullable NamedKey input) {
                  return computeKeyHash(memcachedPrefix, input);
                }
              });

      Map<NamedKey, byte[]> results = Maps.newHashMap();

      BulkFuture<Map<String, Object>> future;
      try {
        future = clientHolder.get().asyncGetBulk(keyLookup.keySet());
      } catch (IllegalStateException e) {
        // operation did not get queued in time (queue is full)
        errorCount.incrementAndGet();
        log.warn(e, "Unable to queue cache operation");
        return results;
      }

      try {
        Map<String, Object> some = future.getSome(timeout, TimeUnit.MILLISECONDS);

        if (future.isTimeout()) {
          future.cancel(false);
          timeoutCount.incrementAndGet();
        }
        missCount.addAndGet(keyLookup.size() - some.size());
        hitCount.addAndGet(some.size());

        for (Map.Entry<String, Object> entry : some.entrySet()) {
          final NamedKey key = keyLookup.get(entry.getKey());
          final byte[] value = (byte[]) entry.getValue();
          if (value != null) {
            results.put(key, deserializeValue(key, value));
          }
        }

        return results;
      } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
        throw Throwables.propagate(e);
      } catch (ExecutionException e) {
        errorCount.incrementAndGet();
        log.warn(e, "Exception pulling item from cache");
        return results;
      }
    }
  }
예제 #13
0
 @Override
 public void messageReceived(final RecoveryFileChunkRequest request, TransportChannel channel)
     throws Exception {
   try (RecoveriesCollection.StatusRef statusRef =
       onGoingRecoveries.getStatusSafe(request.recoveryId(), request.shardId())) {
     final RecoveryStatus recoveryStatus = statusRef.status();
     final Store store = recoveryStatus.store();
     recoveryStatus.state().getTranslog().totalOperations(request.totalTranslogOps());
     final RecoveryState.Index indexState = recoveryStatus.state().getIndex();
     if (request.sourceThrottleTimeInNanos() != RecoveryState.Index.UNKNOWN) {
       indexState.addSourceThrottling(request.sourceThrottleTimeInNanos());
     }
     IndexOutput indexOutput;
     if (request.position() == 0) {
       indexOutput =
           recoveryStatus.openAndPutIndexOutput(request.name(), request.metadata(), store);
     } else {
       indexOutput = recoveryStatus.getOpenIndexOutput(request.name());
     }
     BytesReference content = request.content();
     if (!content.hasArray()) {
       content = content.toBytesArray();
     }
     RateLimiter rl = recoverySettings.rateLimiter();
     if (rl != null) {
       long bytes = bytesSinceLastPause.addAndGet(content.length());
       if (bytes > rl.getMinPauseCheckBytes()) {
         // Time to pause
         bytesSinceLastPause.addAndGet(-bytes);
         long throttleTimeInNanos = rl.pause(bytes);
         indexState.addTargetThrottling(throttleTimeInNanos);
         recoveryStatus.indexShard().recoveryStats().addThrottleTime(throttleTimeInNanos);
       }
     }
     indexOutput.writeBytes(content.array(), content.arrayOffset(), content.length());
     indexState.addRecoveredBytesToFile(request.name(), content.length());
     if (indexOutput.getFilePointer() >= request.length() || request.lastChunk()) {
       try {
         Store.verify(indexOutput);
       } finally {
         // we are done
         indexOutput.close();
       }
       // write the checksum
       recoveryStatus.legacyChecksums().add(request.metadata());
       final String temporaryFileName = recoveryStatus.getTempNameForFile(request.name());
       assert Arrays.asList(store.directory().listAll()).contains(temporaryFileName);
       store.directory().sync(Collections.singleton(temporaryFileName));
       IndexOutput remove = recoveryStatus.removeOpenIndexOutputs(request.name());
       assert remove == null || remove == indexOutput; // remove maybe null if we got finished
     }
   }
   channel.sendResponse(TransportResponse.Empty.INSTANCE);
 }
예제 #14
0
파일: HFile.java 프로젝트: joshua-g/c5
 public static final void offerReadLatency(long latencyNanos, boolean pread) {
   if (pread) {
     fsPreadLatenciesNanos.offer(latencyNanos); // might be silently dropped, if the queue is full
     preadOps.incrementAndGet();
     preadTimeNano.addAndGet(latencyNanos);
   } else {
     fsReadLatenciesNanos.offer(latencyNanos); // might be silently dropped, if the queue is full
     readTimeNano.addAndGet(latencyNanos);
     readOps.incrementAndGet();
   }
 }
예제 #15
0
  public void update(long val) {

    if (val < min.get()) {
      min.set(val);
    }

    if (val > max.get()) {
      max.set(val);
    }

    sum.addAndGet(val);

    count.addAndGet(1);
  }
예제 #16
0
 protected ServerMessage dequeue() {
   ServerMessage m = mq.poll();
   if (m != null) {
     queueOut.addAndGet(1);
   }
   return m;
 }
예제 #17
0
  public long removeMessage(final List<MessageExt> msgs) {
    long result = -1;
    final long now = System.currentTimeMillis();
    try {
      this.lockTreeMap.writeLock().lockInterruptibly();
      this.lastConsumeTimestamp = now;
      try {
        if (!msgTreeMap.isEmpty()) {
          result = this.queueOffsetMax + 1;
          int removedCnt = 0;
          for (MessageExt msg : msgs) {
            MessageExt prev = msgTreeMap.remove(msg.getQueueOffset());
            if (prev != null) {
              removedCnt--;
            }
          }
          msgCount.addAndGet(removedCnt);

          if (!msgTreeMap.isEmpty()) {
            result = msgTreeMap.firstKey();
          }
        }
      } finally {
        this.lockTreeMap.writeLock().unlock();
      }
    } catch (Throwable t) {
      log.error("removeMessage exception", t);
    }

    return result;
  }
 @Override
 public void updateWordsOccurencies() {
   totalWordOccurrences.set(0);
   for (VocabWord word : vocabWords()) {
     totalWordOccurrences.addAndGet((long) word.getElementFrequency());
   }
 }
예제 #19
0
  @Override
  public final void run() {
    try {
      ctx.startSignal.await();
    } catch (InterruptedException e) {
      throw new RuntimeException(e);
    }

    counter.incrementAndGet();

    long startTime = 0;
    if (MEASURE_TASK_TIME) {
      startTime = System.nanoTime();
    }

    FullTextSession s = Search.getFullTextSession(ctx.sf.openSession());
    Transaction tx = s.beginTransaction();
    try {
      execute(s);
      tx.commit();
    } catch (RuntimeException e) {
      tx.rollback();
      throw e;
    } finally {
      s.close();
    }

    if (MEASURE_TASK_TIME) {
      long stopTime = System.nanoTime();
      timer.addAndGet(stopTime - startTime);
    }
  }
예제 #20
0
 /**
  * Free the memory successfully reserved by task.
  *
  * @param m
  */
 public static void freeTaskMem(long m) {
   if (m == 0) return;
   _taskMem.addAndGet(m);
   synchronized (_taskMemLock) {
     _taskMemLock.notifyAll();
   }
 }
예제 #21
0
  /**
   * Send all fragments as separate messages (with same ID !). Example:
   *
   * <pre>
   * Given the generated ID is 2344, number of fragments=3, message {dst,src,buf}
   * would be fragmented into:
   *
   * [2344,3,0]{dst,src,buf1},
   * [2344,3,1]{dst,src,buf2} and
   * [2344,3,2]{dst,src,buf3}
   * </pre>
   */
  private void fragment(Message msg) {
    try {
      byte[] buffer = msg.getRawBuffer();
      List<Range> fragments = Util.computeFragOffsets(msg.getOffset(), msg.getLength(), frag_size);
      int num_frags = fragments.size();
      num_sent_frags.addAndGet(num_frags);

      if (log.isTraceEnabled()) {
        Address dest = msg.getDest();
        StringBuilder sb = new StringBuilder("fragmenting packet to ");
        sb.append((dest != null ? dest.toString() : "<all members>"))
            .append(" (size=")
            .append(buffer.length);
        sb.append(") into ")
            .append(num_frags)
            .append(" fragment(s) [frag_size=")
            .append(frag_size)
            .append(']');
        log.trace(sb.toString());
      }

      long frag_id = getNextId(); // used as a seqno
      for (int i = 0; i < fragments.size(); i++) {
        Range r = fragments.get(i);
        // don't copy the buffer, only src, dest and headers. Only copy the headers one time !
        Message frag_msg = msg.copy(false, i == 0);
        frag_msg.setBuffer(buffer, (int) r.low, (int) r.high);
        FragHeader hdr = new FragHeader(frag_id, i, num_frags);
        frag_msg.putHeader(this.id, hdr);
        down_prot.down(new Event(Event.MSG, frag_msg));
      }
    } catch (Exception e) {
      if (log.isErrorEnabled()) log.error("fragmentation failure", e);
    }
  }
 // Appends a new packet of buffered deletes to the stream,
 // setting its generation:
 public synchronized long push(FrozenBufferedUpdates packet) {
   /*
    * The insert operation must be atomic. If we let threads increment the gen
    * and push the packet afterwards we risk that packets are out of order.
    * With DWPT this is possible if two or more flushes are racing for pushing
    * updates. If the pushed packets get our of order would loose documents
    * since deletes are applied to the wrong segments.
    */
   packet.setDelGen(nextGen++);
   assert packet.any();
   assert checkDeleteStats();
   assert packet.delGen() < nextGen;
   assert updates.isEmpty() || updates.get(updates.size() - 1).delGen() < packet.delGen()
       : "Delete packets must be in order";
   updates.add(packet);
   numTerms.addAndGet(packet.numTermDeletes);
   bytesUsed.addAndGet(packet.bytesUsed);
   if (infoStream.isEnabled("BD")) {
     infoStream.message(
         "BD",
         "push deletes "
             + packet
             + " delGen="
             + packet.delGen()
             + " packetCount="
             + updates.size()
             + " totBytesUsed="
             + bytesUsed.get());
   }
   assert checkDeleteStats();
   return packet.delGen();
 }
  @Override
  public void output(Collection<Metric> metrics) {
    if (!eventRegistered.getAndSet(true)) {
      EventBusManager.createRegistrationPoint()
          .subscribe(
              WriteToStorageEvent.class,
              w -> {
                MetricStorage storage = w.storageToWriteTo().getSubStorageCalled("cassandra");
                storage.store("metrics-to-cassandra", metricCount.longValue());
              });

      EvilManagerHack.subscribe(this.cluster);
    }

    if (metrics.size() == 0) {
      return;
    }

    Map<RetentionTable, BatchStatement> stms =
        LazyMap.<RetentionTable, BatchStatement>lazyMap(
            new HashMap<>(), () -> new BatchStatement());
    for (Metric metric : metrics) {
      insertMetricIntoBatch(metric, stms);
    }
    KeyspaceMetadata metadata = cluster.getMetadata().getKeyspace(keyspace);
    for (RetentionTable table : stms.keySet()) {
      createTableIfNecessary(table, metadata);
    }
    for (BatchStatement batch : stms.values()) {
      session.execute(batch);
    }

    metricCount.addAndGet(metrics.size());
  }
예제 #24
0
    public void receive(Message msg) {
      byte[] buf = msg.getRawBuffer();
      byte type = buf[msg.getOffset()];

      switch (type) {
        case START:
          ByteBuffer tmp = ByteBuffer.wrap(buf, 1 + msg.getOffset(), Global.LONG_SIZE);
          num_msgs = (int) tmp.getLong();
          print = num_msgs / 10;
          current_value.set(0);
          total_bytes.set(0);
          start = System.currentTimeMillis();
          break;
        case DATA:
          long new_val = current_value.incrementAndGet();
          total_bytes.addAndGet(msg.getLength() - Global.INT_SIZE);
          if (print > 0 && new_val % print == 0) System.out.println("received " + new_val);
          if (new_val >= num_msgs) {
            long time = System.currentTimeMillis() - start;
            double msgs_sec = (current_value.get() / (time / 1000.0));
            double throughput = total_bytes.get() / (time / 1000.0);
            System.out.println(
                String.format(
                    "\nreceived %d messages in %d ms (%.2f msgs/sec), throughput=%s",
                    current_value.get(), time, msgs_sec, Util.printBytes(throughput)));
            break;
          }
          break;
        default:
          System.err.println("Type " + type + " is invalid");
      }
    }
예제 #25
0
  @Test
  public void testConsumers() {
    final AtomicLong lc = new AtomicLong(0L);
    Assert.assertEquals(Anoa.of(1L), handler.consumer(lc::addAndGet).apply(handler.of(1L)));
    Assert.assertEquals(1L, lc.get());
    Assert.assertEquals(
        Anoa.empty(Stream.of(Meta.OTHER)),
        handler
            .consumerChecked(
                __ -> {
                  throw new IOException();
                })
            .apply(handler.of(1L)));

    final AtomicLong lc2 = new AtomicLong(0L);
    Assert.assertEquals(
        Anoa.of(1L),
        handler.biConsumer((Long x, Long y) -> lc2.addAndGet(x + y)).apply(Anoa.of(1L), 1L));
    Assert.assertEquals(2L, lc2.get());

    Assert.assertEquals(
        Anoa.empty(Stream.of(Meta.OTHER)),
        handler
            .biConsumerChecked(
                (_1, _2) -> {
                  throw new IOException();
                })
            .apply(Anoa.of(1L), 1L));
  }
예제 #26
0
  public void loadSegment(DataSegment segment, LoadPeonCallback callback) {
    synchronized (lock) {
      if ((currentlyLoading != null)
          && currentlyLoading.getSegmentIdentifier().equals(segment.getIdentifier())) {
        if (callback != null) {
          currentlyLoading.addCallback(callback);
        }
        return;
      }
    }

    SegmentHolder holder = new SegmentHolder(segment, LOAD, Arrays.asList(callback));

    synchronized (lock) {
      if (segmentsToLoad.contains(holder)) {
        if ((callback != null)) {
          currentlyLoading.addCallback(callback);
        }
        return;
      }
    }

    log.info("Asking server peon[%s] to load segment[%s]", basePath, segment);
    queuedSize.addAndGet(segment.getSize());
    segmentsToLoad.add(holder);
    doNext();
  }
예제 #27
0
  public int logManyTablets(Map<CommitSession, List<Mutation>> mutations) throws IOException {

    final Map<CommitSession, List<Mutation>> loggables =
        new HashMap<CommitSession, List<Mutation>>(mutations);
    for (CommitSession t : mutations.keySet()) {
      if (!enabled(t)) loggables.remove(t);
    }
    if (loggables.size() == 0) return -1;

    int seq =
        write(
            loggables.keySet(),
            false,
            new Writer() {
              @Override
              public LoggerOperation write(DfsLogger logger, int ignored) throws Exception {
                List<TabletMutations> copy = new ArrayList<TabletMutations>(loggables.size());
                for (Entry<CommitSession, List<Mutation>> entry : loggables.entrySet()) {
                  CommitSession cs = entry.getKey();
                  copy.add(new TabletMutations(cs.getLogId(), cs.getWALogSeq(), entry.getValue()));
                }
                return logger.logManyTablets(copy);
              }
            });
    for (List<Mutation> entry : loggables.values()) {
      if (entry.size() < 1)
        throw new IllegalArgumentException("logManyTablets: logging empty mutation list");
      for (Mutation m : entry) {
        logSizeEstimate.addAndGet(m.numBytes());
      }
    }
    return seq;
  }
예제 #28
0
 protected boolean enqueue(ServerMessage message) {
   boolean result = mq.add(message);
   if (result == true) {
     queueIn.addAndGet(1);
   }
   return result;
 }
예제 #29
0
  private void doEviction(BlockCacheKey key, CacheablePair evictedBlock) {
    long evictedHeap = 0;
    synchronized (evictedBlock) {
      if (evictedBlock.serializedData == null) {
        // someone else already freed
        return;
      }
      evictedHeap = evictedBlock.heapSize();
      ByteBuffer bb = evictedBlock.serializedData;
      evictedBlock.serializedData = null;
      backingStore.free(bb);

      // We have to do this callback inside the synchronization here.
      // Otherwise we can have the following interleaving:
      // Thread A calls getBlock():
      // SlabCache directs call to this SingleSizeCache
      // It gets the CacheablePair object
      // Thread B runs eviction
      // doEviction() is called and sets serializedData = null, here.
      // Thread A sees the null serializedData, and returns null
      // Thread A calls cacheBlock on the same block, and gets
      // "already cached" since the block is still in backingStore

      if (actionWatcher != null) {
        actionWatcher.onEviction(key, this);
      }
    }
    stats.evicted();
    size.addAndGet(-1 * evictedHeap);
  }
예제 #30
0
  private synchronized void syncLog(ILogRecord logRecord) throws ACIDException {
    ITransactionContext txnCtx = null;

    if (logRecord.getLogType() != LogType.FLUSH) {
      txnCtx = logRecord.getTxnCtx();
      if (txnCtx.getTxnState() == ITransactionManager.ABORTED
          && logRecord.getLogType() != LogType.ABORT) {
        throw new ACIDException(
            "Aborted job(" + txnCtx.getJobId() + ") tried to write non-abort type log record.");
      }
    }
    if (getLogFileOffset(appendLSN.get()) + logRecord.getLogSize() > logFileSize) {
      prepareNextLogFile();
      appendPage.isFull(true);
      getAndInitNewPage();
    } else if (!appendPage.hasSpace(logRecord.getLogSize())) {
      appendPage.isFull(true);
      getAndInitNewPage();
    }
    if (logRecord.getLogType() == LogType.UPDATE) {
      logRecord.setPrevLSN(txnCtx.getLastLSN());
    }
    appendPage.append(logRecord, appendLSN.get());

    if (logRecord.getLogType() == LogType.FLUSH) {
      logRecord.setLSN(appendLSN.get());
    }
    appendLSN.addAndGet(logRecord.getLogSize());
  }