static Address mapLimit(final int units, final int heads) {
   // final int WORD_SIZE = ArchitecturalWord.getModel().bytesInWord();
   final int WORD_SIZE = 4;
   return baseAddress.plus(
       Math.floorDiv(((units + heads + 1) * WORD_SIZE * 2) + (PAGE_SIZE - 1), PAGE_SIZE)
           * PAGE_SIZE);
 }
Esempio n. 2
0
 /**
  * Low level copy of <code>len</code> elements from <code>src[srcPos]</code> to <code>dst[dstPos]
  * </code>.
  *
  * <p>Assumption <code>src != dst || (srcPos >= dstPos)</code> and element size is 8 bytes.
  *
  * @param src the source array
  * @param srcIdx index in the source array to begin copy
  * @param dst the destination array
  * @param dstIdx index in the destination array to being copy
  * @param len number of array elements to copy
  */
 @Inline
 public static void arraycopy64Bit(Object src, int srcIdx, Object dst, int dstIdx, int len) {
   Address srcPtr = VM_Magic.objectAsAddress(src).plus(srcIdx << LOG_BYTES_IN_DOUBLE);
   Address dstPtr = VM_Magic.objectAsAddress(dst).plus(dstIdx << LOG_BYTES_IN_DOUBLE);
   int copyBytes = len << LOG_BYTES_IN_DOUBLE;
   if (USE_NATIVE && len > (NATIVE_THRESHOLD >> LOG_BYTES_IN_DOUBLE)) {
     memcopy(dstPtr, srcPtr, copyBytes);
   } else {
     // The elements of long[] and double[] are always doubleword aligned
     // therefore we can do 64 bit load/stores without worrying about alignment.
     Address endPtr = srcPtr.plus(copyBytes);
     while (srcPtr.LT(endPtr)) {
       // We generate abysmal code on IA32 if we try to use the FP registers,
       // so use the gprs instead even though it results in more instructions.
       if (VM.BuildForIA32) {
         dstPtr.store(srcPtr.loadInt());
         dstPtr.store(srcPtr.loadInt(Offset.fromIntSignExtend(4)), Offset.fromIntSignExtend(4));
       } else {
         dstPtr.store(srcPtr.loadDouble());
       }
       srcPtr = srcPtr.plus(8);
       dstPtr = dstPtr.plus(8);
     }
   }
 }
  private boolean acquireRecyclableLines(int bytes, int align, int offset) {
    while (line < LINES_IN_BLOCK || acquireRecyclableBlock()) {
      line = space.getNextAvailableLine(markTable, line);
      if (line < LINES_IN_BLOCK) {
        int endLine = space.getNextUnavailableLine(markTable, line);
        cursor = recyclableBlock.plus(Extent.fromIntSignExtend(line << LOG_BYTES_IN_LINE));
        limit = recyclableBlock.plus(Extent.fromIntSignExtend(endLine << LOG_BYTES_IN_LINE));
        if (SANITY_CHECK_LINE_MARKS) {
          Address tmp = cursor;
          while (tmp.LT(limit)) {
            if (tmp.loadByte() != (byte) 0) {
              Log.write("cursor: ");
              Log.writeln(cursor);
              Log.write(" limit: ");
              Log.writeln(limit);
              Log.write("current: ");
              Log.write(tmp);
              Log.write("  value: ");
              Log.write(tmp.loadByte());
              Log.write("   line: ");
              Log.write(line);
              Log.write("endline: ");
              Log.write(endLine);
              Log.write("  chunk: ");
              Log.write(Chunk.align(cursor));
              Log.write("     hw: ");
              Log.write(Chunk.getHighWater(Chunk.align(cursor)));
              Log.writeln(" values: ");
              Address tmp2 = cursor;
              while (tmp2.LT(limit)) {
                Log.write(tmp2.loadByte());
                Log.write(" ");
              }
              Log.writeln();
            }
            VM.assertions._assert(tmp.loadByte() == (byte) 0);
            tmp = tmp.plus(1);
          }
        }
        if (VM.VERIFY_ASSERTIONS && bytes <= BYTES_IN_LINE) {
          Address start = alignAllocationNoFill(cursor, align, offset);
          Address end = start.plus(bytes);
          VM.assertions._assert(end.LE(limit));
        }
        VM.memory.zero(cursor, limit.diff(cursor).toWord().toExtent());
        if (VM.VERIFY_ASSERTIONS && Options.verbose.getValue() >= 9) {
          Log.write("Z[");
          Log.write(cursor);
          Log.write("->");
          Log.write(limit);
          Log.writeln("]");
        }

        line = endLine;
        if (VM.VERIFY_ASSERTIONS && copy) VM.assertions._assert(!Block.isDefragSource(cursor));
        return true;
      }
    }
    return false;
  }
 /**
  * Copy <code>numbytes</code> from <code>src</code> to <code>dst</code>. Assumption either the
  * ranges are non overlapping, or <code>src >= dst + BYTES_IN_ADDRESS</code>.
  *
  * @param dst The destination addr
  * @param src The source addr
  * @param numBytes The number of bytes to copy
  */
 private static void internalAlignedWordCopy(Address dst, Address src, int numBytes) {
   Address end = src.plus(numBytes);
   while (src.LT(end)) {
     dst.store(src.loadWord());
     src = src.plus(BYTES_IN_ADDRESS);
     dst = dst.plus(BYTES_IN_ADDRESS);
   }
 }
 /**
  * Copies a region of memory.
  *
  * @param dst Destination address
  * @param src Source address
  * @param cnt Number of bytes to copy
  */
 public static void memcopy(Address dst, Address src, Extent cnt) {
   Address srcEnd = src.plus(cnt);
   Address dstEnd = dst.plus(cnt);
   boolean overlap = !srcEnd.LE(dst) && !dstEnd.LE(src);
   if (overlap) {
     SysCall.sysCall.sysMemmove(dst, src, cnt);
   } else {
     SysCall.sysCall.sysCopy(dst, src, cnt);
   }
 }
 @Inline
 private static void copy8Bytes(Address dstPtr, Address srcPtr) {
   if (BYTES_IN_COPY == 8) {
     if (VM.BuildForIA32) {
       dstPtr.store(srcPtr.loadLong());
     } else {
       dstPtr.store(srcPtr.loadDouble());
     }
   } else {
     copy4Bytes(dstPtr, srcPtr);
     copy4Bytes(dstPtr.plus(4), srcPtr.plus(4));
   }
 }
 /**
  * Low level copy of <code>copyBytes</code> bytes from <code>src[srcPos]</code> to <code>
  * dst[dstPos]</code>.
  *
  * <p>Assumption <code>src != dst || (srcPos >= dstPos)</code> and element size is 8 bytes.
  *
  * @param dstPtr The destination start address
  * @param srcPtr The source start address
  * @param copyBytes The number of bytes to be copied
  */
 public static void aligned64Copy(Address dstPtr, Address srcPtr, int copyBytes) {
   if (USE_NATIVE && copyBytes > NATIVE_THRESHOLD) {
     memcopy(dstPtr, srcPtr, copyBytes);
   } else {
     // The elements of long[] and double[] are always doubleword aligned
     // therefore we can do 64 bit load/stores without worrying about alignment.
     Address endPtr = srcPtr.plus(copyBytes);
     while (srcPtr.LT(endPtr)) {
       copy8Bytes(dstPtr, srcPtr);
       srcPtr = srcPtr.plus(8);
       dstPtr = dstPtr.plus(8);
     }
   }
 }
 /** Reference Arrays */
 public static void referenceArray(Object object, TransitiveClosure trace) {
   Address base = Magic.objectAsAddress(object);
   int length = ObjectModel.getArrayLength(object);
   for (int i = 0; i < length; i++) {
     trace.processEdge(ObjectReference.fromObject(object), base.plus(i << LOG_BYTES_IN_ADDRESS));
   }
 }
 /** All Scalars */
 public static void scalar(Object object, TransitiveClosure trace) {
   Address base = Magic.objectAsAddress(object);
   int[] offsets = ObjectModel.getObjectType(object).asClass().getReferenceOffsets();
   for (int i = 0; i < offsets.length; i++) {
     trace.processEdge(ObjectReference.fromObject(object), base.plus(offsets[i]));
   }
 }
Esempio n. 10
0
  public static void dumpMemory(Address start, int beforeBytes, int afterBytes) {

    beforeBytes = alignDown(beforeBytes, BYTES_IN_ADDRESS);
    afterBytes = alignUp(afterBytes, BYTES_IN_ADDRESS);
    VM.sysWrite("---- Dumping memory from ");
    VM.sysWrite(start.minus(beforeBytes));
    VM.sysWrite(" to ");
    VM.sysWrite(start.plus(afterBytes));
    VM.sysWrite(" ----\n");
    for (int i = -beforeBytes; i < afterBytes; i += BYTES_IN_ADDRESS) {
      VM.sysWrite(i, ": ");
      VM.sysWrite(start.plus(i));
      Word value = start.plus(i).loadWord();
      VM.sysWriteln("  ", value);
    }
  }
  @Inline
  private boolean acquireRecyclableBlockAddressOrder() {
    if (recyclableExhausted) {
      if (VM.VERIFY_ASSERTIONS && Options.verbose.getValue() >= 9) {
        Log.writeln("[no recyclable available]");
      }
      return false;
    }
    int markState = 0;
    boolean usable = false;
    while (!usable) {
      Address next = recyclableBlock.plus(BYTES_IN_BLOCK);
      if (recyclableBlock.isZero() || ImmixSpace.isRecycleAllocChunkAligned(next)) {
        recyclableBlock = space.acquireReusableBlocks();
        if (recyclableBlock.isZero()) {
          recyclableExhausted = true;
          if (VM.VERIFY_ASSERTIONS && Options.verbose.getValue() >= 9) {
            Log.writeln("[recyclable exhausted]");
          }
          line = LINES_IN_BLOCK;
          return false;
        }
      } else {
        recyclableBlock = next;
      }
      markState = Block.getBlockMarkState(recyclableBlock);
      usable = (markState > 0 && markState <= ImmixSpace.getReusuableMarkStateThreshold(copy));
      if (copy && Block.isDefragSource(recyclableBlock)) usable = false;
    }
    if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(!Block.isUnused(recyclableBlock));
    Block.setBlockAsReused(recyclableBlock);

    lineUseCount += (LINES_IN_BLOCK - markState);
    return true; // found something good
  }
Esempio n. 12
0
 @Inline
 public static Address alignUp(Address address, int alignment) {
   return address
       .plus(alignment - 1)
       .toWord()
       .and(Word.fromIntSignExtend(~(alignment - 1)))
       .toAddress();
 }
 /**
  * Copy copyBytes from src to dst. Assumption: either the ranges are non overlapping, or {@code
  * src >= dst + 4}. Also, src and dst are 4 byte aligned and numBytes is a multiple of 4.
  *
  * @param dst the destination addr
  * @param src the source addr
  * @param copyBytes the number of bytes top copy
  */
 public static void aligned32Copy(Address dst, Address src, int copyBytes) {
   if (VM.VerifyAssertions) {
     VM._assert(copyBytes >= 0);
     VM._assert((copyBytes & (BYTES_IN_INT - 1)) == 0);
     VM._assert(src.toWord().and(Word.fromIntZeroExtend(BYTES_IN_INT - 1)).isZero());
     VM._assert(dst.toWord().and(Word.fromIntZeroExtend(BYTES_IN_INT - 1)).isZero());
     VM._assert(src.plus(copyBytes).LE(dst) || src.GE(dst.plus(BYTES_IN_INT)));
   }
   if (USE_NATIVE && copyBytes > NATIVE_THRESHOLD) {
     memcopy(dst, src, copyBytes);
   } else {
     Offset numBytes = Offset.fromIntSignExtend(copyBytes);
     if (BYTES_IN_COPY == 8 && copyBytes != 0) {
       Word wordMask = Word.fromIntZeroExtend(BYTES_IN_COPY - 1);
       Word srcAlignment = src.toWord().and(wordMask);
       if (srcAlignment.EQ(dst.toWord().and(wordMask))) {
         Offset i = Offset.zero();
         if (srcAlignment.EQ(Word.fromIntZeroExtend(BYTES_IN_INT))) {
           copy4Bytes(dst.plus(i), src.plus(i));
           i = i.plus(BYTES_IN_INT);
         }
         Word endAlignment = srcAlignment.plus(numBytes).and(wordMask);
         numBytes = numBytes.minus(endAlignment.toOffset());
         for (; i.sLT(numBytes); i = i.plus(BYTES_IN_COPY)) {
           copy8Bytes(dst.plus(i), src.plus(i));
         }
         if (!endAlignment.isZero()) {
           copy4Bytes(dst.plus(i), src.plus(i));
         }
         return;
       }
     }
     // normal case: 32 bit or (64 bit not aligned)
     for (Offset i = Offset.zero(); i.sLT(numBytes); i = i.plus(BYTES_IN_INT)) {
       copy4Bytes(dst.plus(i), src.plus(i));
     }
   }
 }
Esempio n. 14
0
 /**
  * Low level copy of <code>len</code> elements from <code>src[srcPos]</code> to <code>dst[dstPos]
  * </code>.
  *
  * <p>Assumption: <code>src != dst || (srcPos >= dstPos)</code> and element size is 4 bytes.
  *
  * @param src the source array
  * @param srcIdx index in the source array to begin copy
  * @param dst the destination array
  * @param dstIdx index in the destination array to being copy
  * @param len number of array elements to copy
  */
 @Inline
 public static void arraycopy32Bit(Object src, int srcIdx, Object dst, int dstIdx, int len) {
   Address srcPtr = VM_Magic.objectAsAddress(src).plus(srcIdx << LOG_BYTES_IN_INT);
   Address dstPtr = VM_Magic.objectAsAddress(dst).plus(dstIdx << LOG_BYTES_IN_INT);
   int copyBytes = len << LOG_BYTES_IN_INT;
   if (USE_NATIVE && len > (NATIVE_THRESHOLD >> LOG_BYTES_IN_INT)) {
     memcopy(dstPtr, srcPtr, copyBytes);
   } else {
     // The elements of int[] and float[] are always 32 bit aligned
     // therefore we can do 32 bit load/stores without worrying about alignment.
     // TODO: do measurements to determine if on PPC it is a good idea to check
     //       for compatible doubleword alignment and handle that case via the FPRs in 64 bit
     // chunks.
     //       Unclear if this will be a big enough win to justify checking because for big copies
     //       we are going into memcopy anyways and that will be faster than anything we do here.
     Address endPtr = srcPtr.plus(copyBytes);
     while (srcPtr.LT(endPtr)) {
       dstPtr.store(srcPtr.loadInt());
       srcPtr = srcPtr.plus(4);
       dstPtr = dstPtr.plus(4);
     }
   }
 }
  /**
   * External allocation slow path (called by superclass when slow path is actually taken. This is
   * necessary (rather than a direct call from the fast path) because of the possibility of a thread
   * switch and corresponding re-association of bump pointers to kernel threads.
   *
   * @param bytes The number of bytes allocated
   * @param align The requested alignment
   * @param offset The offset from the alignment
   * @return The address of the first byte of the allocated region or zero on failure
   */
  protected final Address allocSlowOnce(int bytes, int align, int offset) {
    Address ptr = space.getSpace(hot, copy, lineUseCount);

    if (ptr.isZero()) {
      lineUseCount = 0;
      return ptr; // failed allocation --- we will need to GC
    }

    /* we have been given a clean block */
    if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(Block.isAligned(ptr));
    lineUseCount = LINES_IN_BLOCK;

    zeroBlock(ptr);
    if (requestForLarge) {
      largeCursor = ptr;
      largeLimit = ptr.plus(BYTES_IN_BLOCK);
    } else {
      cursor = ptr;
      limit = ptr.plus(BYTES_IN_BLOCK);
    }

    return alloc(bytes, align, offset);
  }
  /**
   * Allocate space for a new object. This is frequently executed code and the coding is
   * deliberaetly sensitive to the optimizing compiler. After changing this, always check the IR/MC
   * that is generated.
   *
   * @param bytes The number of bytes allocated
   * @param align The requested alignment
   * @param offset The offset from the alignment
   * @return The address of the first byte of the allocated region
   */
  @Inline
  public final Address alloc(int bytes, int align, int offset) {
    /* establish how much we need */
    Address start = alignAllocationNoFill(cursor, align, offset);
    Address end = start.plus(bytes);

    /* check whether we've exceeded the limit */
    if (end.GT(limit)) {
      if (bytes > BYTES_IN_LINE) return overflowAlloc(bytes, align, offset);
      else return allocSlowHot(bytes, align, offset);
    }

    /* sufficient memory is available, so we can finish performing the allocation */
    fillAlignmentGap(cursor, start);
    cursor = end;

    return start;
  }
  /**
   * Allocate space for a new object. This is frequently executed code and the coding is
   * deliberaetly sensitive to the optimizing compiler. After changing this, always check the IR/MC
   * that is generated.
   *
   * @param bytes The number of bytes allocated
   * @param align The requested alignment
   * @param offset The offset from the alignment
   * @return The address of the first byte of the allocated region
   */
  public final Address overflowAlloc(int bytes, int align, int offset) {
    /* establish how much we need */
    Address start = alignAllocationNoFill(largeCursor, align, offset);
    Address end = start.plus(bytes);

    /* check whether we've exceeded the limit */
    if (end.GT(largeLimit)) {
      requestForLarge = true;
      Address rtn = allocSlowInline(bytes, align, offset);
      requestForLarge = false;
      return rtn;
    }

    /* sufficient memory is available, so we can finish performing the allocation */
    fillAlignmentGap(largeCursor, start);
    largeCursor = end;

    return start;
  }
 /** All patterns bottom out here */
 @Inline
 public static void pattern(int pattern, Object object, TransitiveClosure trace) {
   Address base = Magic.objectAsAddress(object).plus(FIELD_ZERO_OFFSET);
   if ((pattern & 1) != 0) {
     trace.processEdge(ObjectReference.fromObject(object), base.plus(0));
   }
   if ((pattern & 2) != 0) {
     trace.processEdge(ObjectReference.fromObject(object), base.plus(1 << LOG_BYTES_IN_ADDRESS));
   }
   if ((pattern & 4) != 0) {
     trace.processEdge(ObjectReference.fromObject(object), base.plus(2 << LOG_BYTES_IN_ADDRESS));
   }
   if ((pattern & 8) != 0) {
     trace.processEdge(ObjectReference.fromObject(object), base.plus(3 << LOG_BYTES_IN_ADDRESS));
   }
   if ((pattern & 16) != 0) {
     trace.processEdge(ObjectReference.fromObject(object), base.plus(4 << LOG_BYTES_IN_ADDRESS));
   }
   if ((pattern & 32) != 0) {
     trace.processEdge(ObjectReference.fromObject(object), base.plus(5 << LOG_BYTES_IN_ADDRESS));
   }
 }
Esempio n. 19
0
  /**
   * Low level copy of len elements from src[srcPos] to dst[dstPos].
   *
   * <p>Assumptions: <code> src != dst || (scrPos >= dstPos + 4) </code> and src and dst are 8Bit
   * arrays.
   *
   * @param src the source array
   * @param srcPos index in the source array to begin copy
   * @param dst the destination array
   * @param dstPos index in the destination array to being copy
   * @param len number of array elements to copy
   */
  @Inline
  public static void arraycopy8Bit(Object src, int srcPos, Object dst, int dstPos, int len) {
    if (USE_NATIVE && len > NATIVE_THRESHOLD) {
      memcopy(
          VM_Magic.objectAsAddress(dst).plus(dstPos),
          VM_Magic.objectAsAddress(src).plus(srcPos),
          len);
    } else {
      if (len >= BYTES_IN_ADDRESS
          && (srcPos & (BYTES_IN_ADDRESS - 1)) == (dstPos & (BYTES_IN_ADDRESS - 1))) {
        // relative alignment is the same
        int byteStart = srcPos;
        int wordStart = alignUp(srcPos, BYTES_IN_ADDRESS);
        int wordEnd = alignDown(srcPos + len, BYTES_IN_ADDRESS);
        int byteEnd = srcPos + len;
        int startDiff = wordStart - byteStart;
        int endDiff = byteEnd - wordEnd;
        int wordLen = wordEnd - wordStart;
        Address srcPtr = VM_Magic.objectAsAddress(src).plus(srcPos + startDiff);
        Address dstPtr = VM_Magic.objectAsAddress(dst).plus(dstPos + startDiff);

        if (VM.BuildFor64Addr) {
          switch (startDiff) {
            case 7:
              dstPtr.store(
                  srcPtr.loadByte(Offset.fromIntSignExtend(-7)), Offset.fromIntSignExtend(-7));
            case 6:
              dstPtr.store(
                  srcPtr.loadByte(Offset.fromIntSignExtend(-6)), Offset.fromIntSignExtend(-6));
            case 5:
              dstPtr.store(
                  srcPtr.loadByte(Offset.fromIntSignExtend(-5)), Offset.fromIntSignExtend(-5));
            case 4:
              dstPtr.store(
                  srcPtr.loadByte(Offset.fromIntSignExtend(-4)), Offset.fromIntSignExtend(-4));
            case 3:
              dstPtr.store(
                  srcPtr.loadByte(Offset.fromIntSignExtend(-3)), Offset.fromIntSignExtend(-3));
            case 2:
              dstPtr.store(
                  srcPtr.loadByte(Offset.fromIntSignExtend(-2)), Offset.fromIntSignExtend(-2));
            case 1:
              dstPtr.store(
                  srcPtr.loadByte(Offset.fromIntSignExtend(-1)), Offset.fromIntSignExtend(-1));
          }
        } else {
          switch (startDiff) {
            case 3:
              dstPtr.store(
                  srcPtr.loadByte(Offset.fromIntSignExtend(-3)), Offset.fromIntSignExtend(-3));
            case 2:
              dstPtr.store(
                  srcPtr.loadByte(Offset.fromIntSignExtend(-2)), Offset.fromIntSignExtend(-2));
            case 1:
              dstPtr.store(
                  srcPtr.loadByte(Offset.fromIntSignExtend(-1)), Offset.fromIntSignExtend(-1));
          }
        }

        Address endPtr = srcPtr.plus(wordLen);
        while (srcPtr.LT(endPtr)) {
          dstPtr.store(srcPtr.loadWord());
          srcPtr = srcPtr.plus(BYTES_IN_ADDRESS);
          dstPtr = dstPtr.plus(BYTES_IN_ADDRESS);
        }

        if (VM.BuildFor64Addr) {
          switch (endDiff) {
            case 7:
              dstPtr.store(
                  srcPtr.loadByte(Offset.fromIntSignExtend(6)), Offset.fromIntSignExtend(6));
            case 6:
              dstPtr.store(
                  srcPtr.loadByte(Offset.fromIntSignExtend(5)), Offset.fromIntSignExtend(5));
            case 5:
              dstPtr.store(
                  srcPtr.loadByte(Offset.fromIntSignExtend(4)), Offset.fromIntSignExtend(4));
            case 4:
              dstPtr.store(
                  srcPtr.loadByte(Offset.fromIntSignExtend(3)), Offset.fromIntSignExtend(3));
            case 3:
              dstPtr.store(
                  srcPtr.loadByte(Offset.fromIntSignExtend(2)), Offset.fromIntSignExtend(2));
            case 2:
              dstPtr.store(
                  srcPtr.loadByte(Offset.fromIntSignExtend(1)), Offset.fromIntSignExtend(1));
            case 1:
              dstPtr.store(srcPtr.loadByte());
          }
        } else {
          switch (endDiff) {
            case 3:
              dstPtr.store(
                  srcPtr.loadByte(Offset.fromIntSignExtend(2)), Offset.fromIntSignExtend(2));
            case 2:
              dstPtr.store(
                  srcPtr.loadByte(Offset.fromIntSignExtend(1)), Offset.fromIntSignExtend(1));
            case 1:
              dstPtr.store(srcPtr.loadByte());
          }
        }

      } else {
        Address srcPtr = VM_Magic.objectAsAddress(src).plus(srcPos);
        Address dstPtr = VM_Magic.objectAsAddress(dst).plus(dstPos);
        Address endPtr = srcPtr.plus(len);
        while (srcPtr.LT(endPtr)) {
          dstPtr.store(srcPtr.loadByte());
          srcPtr = srcPtr.plus(1);
          dstPtr = dstPtr.plus(1);
        }
      }
    }
  }
Esempio n. 20
0
  /**
   * Low level copy of len elements from src[srcPos] to dst[dstPos].
   *
   * <p>Assumption src != dst || (srcPos >= dstPos + 2).
   *
   * @param src the source array
   * @param srcPos index in the source array to begin copy
   * @param dst the destination array
   * @param dstPos index in the destination array to being copy
   * @param len number of array elements to copy
   */
  @Inline
  public static void arraycopy16Bit(Object src, int srcPos, Object dst, int dstPos, int len) {
    if (USE_NATIVE && len > (NATIVE_THRESHOLD >> LOG_BYTES_IN_SHORT)) {
      memcopy(
          VM_Magic.objectAsAddress(dst).plus(dstPos << LOG_BYTES_IN_SHORT),
          VM_Magic.objectAsAddress(src).plus(srcPos << LOG_BYTES_IN_SHORT),
          len << LOG_BYTES_IN_SHORT);
    } else {
      if (len >= (BYTES_IN_ADDRESS >>> LOG_BYTES_IN_SHORT)
          && (srcPos & ((BYTES_IN_ADDRESS - 1) >>> LOG_BYTES_IN_SHORT))
              == (dstPos & ((BYTES_IN_ADDRESS - 1) >>> LOG_BYTES_IN_SHORT))) {
        // relative alignment is the same
        int byteStart = srcPos << LOG_BYTES_IN_SHORT;
        int wordStart = alignUp(byteStart, BYTES_IN_ADDRESS);
        int wordEnd = alignDown(byteStart + (len << LOG_BYTES_IN_SHORT), BYTES_IN_ADDRESS);
        int byteEnd = byteStart + (len << LOG_BYTES_IN_SHORT);
        int startDiff = wordStart - byteStart;
        int endDiff = byteEnd - wordEnd;
        int wordLen = wordEnd - wordStart;
        Address srcPtr =
            VM_Magic.objectAsAddress(src).plus((srcPos << LOG_BYTES_IN_SHORT) + startDiff);
        Address dstPtr =
            VM_Magic.objectAsAddress(dst).plus((dstPos << LOG_BYTES_IN_SHORT) + startDiff);

        if (VM.BuildFor64Addr) {
          switch (startDiff) {
            case 6:
              dstPtr.store(
                  srcPtr.loadChar(Offset.fromIntSignExtend(-6)), Offset.fromIntSignExtend(-6));
            case 4:
              dstPtr.store(
                  srcPtr.loadChar(Offset.fromIntSignExtend(-4)), Offset.fromIntSignExtend(-4));
            case 2:
              dstPtr.store(
                  srcPtr.loadChar(Offset.fromIntSignExtend(-2)), Offset.fromIntSignExtend(-2));
          }
        } else {
          if (startDiff == 2) {
            dstPtr.store(
                srcPtr.loadChar(Offset.fromIntSignExtend(-2)), Offset.fromIntSignExtend(-2));
          }
        }

        Address endPtr = srcPtr.plus(wordLen);
        while (srcPtr.LT(endPtr)) {
          dstPtr.store(srcPtr.loadWord());
          srcPtr = srcPtr.plus(BYTES_IN_ADDRESS);
          dstPtr = dstPtr.plus(BYTES_IN_ADDRESS);
        }

        if (VM.BuildFor64Addr) {
          switch (endDiff) {
            case 6:
              dstPtr.store(
                  srcPtr.loadChar(Offset.fromIntSignExtend(4)), Offset.fromIntSignExtend(4));
            case 4:
              dstPtr.store(
                  srcPtr.loadChar(Offset.fromIntSignExtend(2)), Offset.fromIntSignExtend(2));
            case 2:
              dstPtr.store(srcPtr.loadChar());
          }
        } else {
          if (endDiff == 2) {
            dstPtr.store(srcPtr.loadChar());
          }
        }

      } else {
        Address srcPtr = VM_Magic.objectAsAddress(src).plus(srcPos << LOG_BYTES_IN_CHAR);
        Address dstPtr = VM_Magic.objectAsAddress(dst).plus(dstPos << LOG_BYTES_IN_CHAR);
        Address endPtr = srcPtr.plus(len << LOG_BYTES_IN_CHAR);
        while (srcPtr.LT(endPtr)) {
          dstPtr.store(srcPtr.loadChar());
          srcPtr = srcPtr.plus(2);
          dstPtr = dstPtr.plus(2);
        }
      }
    }
  }
  /**
   * Low level copy of <code>copyBytes</code> bytes from <code>src[srcPos]</code> to <code>
   * dst[dstPos]</code>.
   *
   * <p>Assumption: <code>src != dst || (srcPos >= dstPos)</code> and element size is 2 bytes.
   *
   * @param dstPtr The destination start address
   * @param srcPtr The source start address
   * @param copyBytes The number of bytes to be copied
   */
  public static void aligned16Copy(Address dstPtr, Address srcPtr, int copyBytes) {
    if (USE_NATIVE && copyBytes > NATIVE_THRESHOLD) {
      memcopy(dstPtr, srcPtr, copyBytes);
    } else {
      if (copyBytes >= BYTES_IN_COPY
          && (srcPtr.toWord().and(Word.fromIntZeroExtend(BYTES_IN_COPY - 1))
              == (dstPtr.toWord().and(Word.fromIntZeroExtend(BYTES_IN_COPY - 1))))) {
        // relative alignment is the same
        Address endPtr = srcPtr.plus(copyBytes);
        Address wordEndPtr =
            endPtr.toWord().and(Word.fromIntZeroExtend(BYTES_IN_COPY - 1).not()).toAddress();

        if (BYTES_IN_COPY == 8) {
          if (srcPtr.toWord().and(Word.fromIntZeroExtend(2)).NE(Word.zero())) {
            copy2Bytes(dstPtr, srcPtr);
            srcPtr = srcPtr.plus(2);
            dstPtr = dstPtr.plus(2);
          }
          if (srcPtr.toWord().and(Word.fromIntZeroExtend(4)).NE(Word.zero())) {
            copy4Bytes(dstPtr, srcPtr);
            srcPtr = srcPtr.plus(4);
            dstPtr = dstPtr.plus(4);
          }
        } else {
          if (srcPtr.toWord().and(Word.fromIntZeroExtend(2)).NE(Word.zero())) {
            copy2Bytes(dstPtr, srcPtr);
            srcPtr = srcPtr.plus(2);
            dstPtr = dstPtr.plus(2);
          }
        }
        while (srcPtr.LT(wordEndPtr)) {
          if (BYTES_IN_COPY == 8) {
            copy8Bytes(dstPtr, srcPtr);
          } else {
            copy4Bytes(dstPtr, srcPtr);
          }
          srcPtr = srcPtr.plus(BYTES_IN_COPY);
          dstPtr = dstPtr.plus(BYTES_IN_COPY);
        }
        // if(VM.VerifyAssertions) VM._assert(wordEndPtr.EQ(srcPtr));
        if (BYTES_IN_COPY == 8) {
          if (endPtr.toWord().and(Word.fromIntZeroExtend(4)).NE(Word.zero())) {
            copy4Bytes(dstPtr, srcPtr);
            srcPtr = srcPtr.plus(4);
            dstPtr = dstPtr.plus(4);
          }
          if (endPtr.toWord().and(Word.fromIntZeroExtend(2)).NE(Word.zero())) {
            copy2Bytes(dstPtr, srcPtr);
          }
        } else {
          if (endPtr.toWord().and(Word.fromIntZeroExtend(2)).NE(Word.zero())) {
            copy2Bytes(dstPtr, srcPtr);
          }
        }
      } else {
        Address endPtr = srcPtr.plus(copyBytes);
        while (srcPtr.LT(endPtr)) {
          copy2Bytes(dstPtr, srcPtr);
          srcPtr = srcPtr.plus(2);
          dstPtr = dstPtr.plus(2);
        }
      }
    }
  }