예제 #1
0
  // there might be a straightforward one-line way to do the below
  // that's portable and totally safe against roundoff, but I haven't
  // thought of it.  Therefore, we opt on the side of caution
  private int maptype1_quantvals() {
    int vals = (int) (Math.floor(Math.pow(entries, 1. / dim)));

    // the above *should* be reliable, but we'll not assume that FP is
    // ever reliable when bitstream sync is at stake; verify via integer
    // means that vals really is the greatest value of dim for which
    // vals^b->bim <= b->entries
    // treat the above as an initial guess
    while (true) {
      int acc = 1;
      int acc1 = 1;
      for (int i = 0; i < dim; i++) {
        acc *= vals;
        acc1 *= vals + 1;
      }
      if (acc <= entries && acc1 > entries) {
        return (vals);
      } else {
        if (acc > entries) {
          vals--;
        } else {
          vals++;
        }
      }
    }
  }
예제 #2
0
  // unpack the quantized list of values for encode/decode
  // we need to deal with two map types: in map type 1, the values are
  // generated algorithmically (each column of the vector counts through
  // the values in the quant vector). in map type 2, all the values came
  // in in an explicit list.  Both value lists must be unpacked
  float[] unquantize() {

    if (maptype == 1 || maptype == 2) {
      int quantvals;
      float mindel = float32_unpack(q_min);
      float delta = float32_unpack(q_delta);
      float[] r = new float[entries * dim];

      // System.err.println("q_min="+q_min+", mindel="+mindel);

      // maptype 1 and 2 both use a quantized value vector, but
      // different sizes
      switch (maptype) {
        case 1:
          // most of the time, entries%dimensions == 0, but we need to be
          // well defined.  We define that the possible vales at each
          // scalar is values == entries/dim.  If entries%dim != 0, we'll
          // have 'too few' values (values*dim<entries), which means that
          // we'll have 'left over' entries; left over entries use zeroed
          // values (and are wasted).  So don't generate codebooks like that
          quantvals = maptype1_quantvals();
          for (int j = 0; j < entries; j++) {
            float last = 0.f;
            int indexdiv = 1;
            for (int k = 0; k < dim; k++) {
              int index = (j / indexdiv) % quantvals;
              float val = quantlist[index];
              val = Math.abs(val) * delta + mindel + last;
              if (q_sequencep != 0) last = val;
              r[j * dim + k] = val;
              indexdiv *= quantvals;
            }
          }
          break;
        case 2:
          for (int j = 0; j < entries; j++) {
            float last = 0.f;
            for (int k = 0; k < dim; k++) {
              float val = quantlist[j * dim + k];
              // if((j*dim+k)==0){System.err.println(" | 0 -> "+val+" | ");}
              val = Math.abs(val) * delta + mindel + last;
              if (q_sequencep != 0) last = val;
              r[j * dim + k] = val;
              // if((j*dim+k)==0){System.err.println(" $ r[0] -> "+r[0]+" | ");}
            }
          }
          // System.err.println("\nr[0]="+r[0]);
      }
      return (r);
    }
    return (null);
  }
예제 #3
0
 // doesn't currently guard under/overflow
 static long float32_pack(float val) {
   int sign = 0;
   int exp;
   int mant;
   if (val < 0) {
     sign = 0x80000000;
     val = -val;
   }
   exp = (int) Math.floor(Math.log(val) / Math.log(2));
   mant = (int) Math.rint(Math.pow(val, (VQ_FMAN - 1) - exp));
   exp = (exp + VQ_FEXP_BIAS) << VQ_FMAN;
   return (sign | exp | mant);
 }
예제 #4
0
  Object look(DspState vd, InfoMode vm, Object vr) {
    InfoResidue0 info = (InfoResidue0) vr;
    LookResidue0 look = new LookResidue0();
    int acc = 0;
    int dim;
    int maxstage = 0;
    look.info = info;
    look.map = vm.mapping;

    look.parts = info.partitions;
    look.fullbooks = vd.fullbooks;
    look.phrasebook = vd.fullbooks[info.groupbook];

    dim = look.phrasebook.dim;

    look.partbooks = new int[look.parts][];

    for (int j = 0; j < look.parts; j++) {
      int i = info.secondstages[j];
      int stages = Util.ilog(i);
      if (stages != 0) {
        if (stages > maxstage) maxstage = stages;
        look.partbooks[j] = new int[stages];
        for (int k = 0; k < stages; k++) {
          if ((i & (1 << k)) != 0) {
            look.partbooks[j][k] = info.booklist[acc++];
          }
        }
      }
    }

    look.partvals = (int) Math.rint(Math.pow(look.parts, dim));
    look.stages = maxstage;
    look.decodemap = new int[look.partvals][];
    for (int j = 0; j < look.partvals; j++) {
      int val = j;
      int mult = look.partvals / look.parts;
      look.decodemap[j] = new int[dim];

      for (int k = 0; k < dim; k++) {
        int deco = val / mult;
        val -= deco * mult;
        mult /= look.parts;
        look.decodemap[j][k] = deco;
      }
    }
    return (look);
  }
 private boolean openSeekable() {
   final Info initialInfo = new Info();
   final Comment initialComment = new Comment();
   this.m_chunkSize = Math.min(8500, (int) length(this.m_vorbisStream));
   final Page page = new Page();
   final int[] testSerialno = {0};
   final int ret = this.fetchHeaders(initialInfo, initialComment, testSerialno, null);
   final int serialno = testSerialno[0];
   final int dataOffset = (int) this.m_offset;
   this.m_oggStreamState.clear();
   if (ret < 0) {
     return false;
   }
   seek(this.m_vorbisStream, 0L, 1);
   this.m_offset = tell(this.m_vorbisStream);
   final long end = this.getPreviousPage(page);
   if (page.serialno() != serialno) {
     if (this.bisectForwardSerialno(0L, 0L, end + 1L, serialno, 0) < 0) {
       return false;
     }
   } else if (this.bisectForwardSerialno(0L, end, end + 1L, serialno, 0) < 0) {
     return false;
   }
   this.prefetchAllHeaders(initialInfo, initialComment, dataOffset);
   this.rawSeek(this.m_dataOffsets[0]);
   return true;
 }
 private int processPacket(final boolean readPage) {
   while (true) {
     if (this.m_decodeReady) {
       final int result = this.m_oggStreamState.packetout(this.m_oggPacket);
       if (result > 0) {
         long granulepos = this.m_oggPacket.granulepos;
         if (this.m_block.synthesis(this.m_oggPacket) == 0) {
           this.m_dspState.synthesis_blockin(this.m_block);
           if (granulepos != -1L) {
             final int link = this.m_vorbisStream.isSeekable() ? this.m_currentLink : 0;
             final int samples = this.m_dspState.synthesis_pcmout(null, null);
             granulepos = Math.max(0L, granulepos - samples);
             for (int i = 0; i < link; ++i) {
               granulepos += this.m_pcmLengths[i];
             }
             this.m_pcmOffset = granulepos;
           }
           return 1;
         }
       }
     }
     if (!readPage) {
       return 0;
     }
     if (this.getNextPage(this.m_oggPage, -1L) < 0) {
       return 0;
     }
     if (this.m_decodeReady && this.m_currentSerialno != this.m_oggPage.serialno()) {
       this.decodeClear();
     }
     if (!this.m_decodeReady) {
       if (this.m_vorbisStream.isSeekable()) {
         this.m_currentSerialno = this.m_oggPage.serialno();
         int j;
         for (j = 0; j < this.m_links && this.m_serialnos[j] != this.m_currentSerialno; ++j) {}
         if (j == this.m_links) {
           return -1;
         }
         this.m_currentLink = j;
         this.m_oggStreamState.init(this.m_currentSerialno);
         this.m_oggStreamState.reset();
       } else {
         final int[] serialnos = {0};
         final int ret =
             this.fetchHeaders(this.m_info[0], this.m_comments[0], serialnos, this.m_oggPage);
         this.m_currentSerialno = serialnos[0];
         if (ret != 0) {
           return ret;
         }
         ++this.m_currentLink;
       }
       this.makeDecodeReady();
     }
     this.m_oggStreamState.pagein(this.m_oggPage);
   }
 }
 @Override
 public int read(final ByteBuffer bb, final int pos) {
   this.m_isReset = false;
   bb.position(pos);
   while (bb.remaining() > 0) {
     boolean needToProcessPacket = true;
     if (this.m_decodeReady) {
       final Info info = this.m_info[this.m_currentLink];
       final int totalSamples = this.m_dspState.synthesis_pcmout(this.m_pcmf_buffer, this._index);
       final float[][] pcm = this.m_pcmf_buffer[0];
       if (totalSamples > 0) {
         final int channelsCount = info.channelsCount;
         final int bytesPerSample = channelsCount * 2;
         int samples = Math.min(totalSamples, bb.remaining() / bytesPerSample);
         samples = Math.min(samples, 8192 / bytesPerSample);
         needToProcessPacket = (samples == totalSamples);
         for (int i = 0; i < channelsCount; ++i) {
           int ptr = i * 2;
           final int mono = this._index[i];
           for (int j = 0; j < samples; ++j) {
             int val = (int) (pcm[i][mono + j] * 32767.0f);
             if (val > 32767) {
               val = 32767;
             }
             if (val < -32768) {
               val = -32768;
             }
             if (val < 0) {
               val |= 0x8000;
             }
             if (JOrbisStream.m_bigEndian) {
               this.m_conversionBuffer[ptr] = (byte) (val >>> 8 & 0xFF);
               this.m_conversionBuffer[ptr + 1] = (byte) (val & 0xFF);
             } else {
               this.m_conversionBuffer[ptr] = (byte) (val & 0xFF);
               this.m_conversionBuffer[ptr + 1] = (byte) (val >>> 8 & 0xFF);
             }
             ptr += 2 * channelsCount;
           }
         }
         final int writtenBytesLength = samples * bytesPerSample;
         bb.put(this.m_conversionBuffer, 0, writtenBytesLength);
         this.m_dspState.synthesis_read(samples);
         this.m_pcmOffset += samples;
       }
     }
     if (needToProcessPacket) {
       switch (this.processPacket(true)) {
         case 0:
           {
             return -(bb.position() - pos);
           }
         case -1:
           {
             return -(bb.position() - pos);
           }
         default:
           {
             continue;
           }
       }
     }
   }
   return bb.position() - pos;
 }
예제 #8
0
  int pack(Buffer opb) {
    int i;
    boolean ordered = false;

    opb.write(0x564342, 24);
    opb.write(dim, 16);
    opb.write(entries, 24);

    // pack the codewords.  There are two packings; length ordered and
    // length random.  Decide between the two now.

    for (i = 1; i < entries; i++) {
      if (lengthlist[i] < lengthlist[i - 1]) break;
    }
    if (i == entries) ordered = true;

    if (ordered) {
      // length ordered.  We only need to say how many codewords of
      // each length.  The actual codewords are generated
      // deterministically

      int count = 0;
      opb.write(1, 1); // ordered
      opb.write(lengthlist[0] - 1, 5); // 1 to 32

      for (i = 1; i < entries; i++) {
        int _this = lengthlist[i];
        int _last = lengthlist[i - 1];
        if (_this > _last) {
          for (int j = _last; j < _this; j++) {
            opb.write(i - count, ilog(entries - count));
            count = i;
          }
        }
      }
      opb.write(i - count, ilog(entries - count));
    } else {
      // length random.  Again, we don't code the codeword itself, just
      // the length.  This time, though, we have to encode each length
      opb.write(0, 1); // unordered

      // algortihmic mapping has use for 'unused entries', which we tag
      // here.  The algorithmic mapping happens as usual, but the unused
      // entry has no codeword.
      for (i = 0; i < entries; i++) {
        if (lengthlist[i] == 0) break;
      }

      if (i == entries) {
        opb.write(0, 1); // no unused entries
        for (i = 0; i < entries; i++) {
          opb.write(lengthlist[i] - 1, 5);
        }
      } else {
        opb.write(1, 1); // we have unused entries; thus we tag
        for (i = 0; i < entries; i++) {
          if (lengthlist[i] == 0) {
            opb.write(0, 1);
          } else {
            opb.write(1, 1);
            opb.write(lengthlist[i] - 1, 5);
          }
        }
      }
    }

    // is the entry number the desired return value, or do we have a
    // mapping? If we have a mapping, what type?
    opb.write(maptype, 4);
    switch (maptype) {
      case 0:
        // no mapping
        break;
      case 1:
      case 2:
        // implicitly populated value mapping
        // explicitly populated value mapping
        if (quantlist == null) {
          // no quantlist?  error
          return (-1);
        }

        // values that define the dequantization
        opb.write(q_min, 32);
        opb.write(q_delta, 32);
        opb.write(q_quant - 1, 4);
        opb.write(q_sequencep, 1);

        {
          int quantvals = 0;
          switch (maptype) {
            case 1:
              // a single column of (c->entries/c->dim) quantized values for
              // building a full value list algorithmically (square lattice)
              quantvals = maptype1_quantvals();
              break;
            case 2:
              // every value (c->entries*c->dim total) specified explicitly
              quantvals = entries * dim;
              break;
          }

          // quantized values
          for (i = 0; i < quantvals; i++) {
            opb.write(Math.abs(quantlist[i]), q_quant);
          }
        }
        break;
      default:
        // error case; we don't have any other map types now
        return (-1);
    }
    return (0);
  }
예제 #9
0
 static float ldexp(float foo, int e) {
   return (float) (foo * Math.pow(2, e));
 }