protected void handleNotProvidedFromContentCase() {
   // move to "detectors" if not already
   String specDefault = null;
   // specDefault = getEncodingDetector().getSpecDefaultEncoding();
   String charset = NonContentBasedEncodingRules.useDefaultNameRules(specDefault);
   Assert.isNotNull(charset, PROGRAM_ERROR__FAILED_TO_FIND_ANY_CHARSET_ANYWHERE_);
   fCurrentEncodingMemento = CodedIO.createEncodingMemento(charset);
 }
  private EncodingMemento checkForEncodingInContents() throws CoreException, IOException {
    EncodingMemento result = null;

    // if encoding memento already set, and no need to get again.
    if (fEncodingMemento != null) {
      result = fEncodingMemento;
    } else {
      if (fClientSuppliedReader) {
        fReader.reset();
        IContentTypeManager contentTypeManager = Platform.getContentTypeManager();
        try {
          IContentDescription contentDescription =
              contentTypeManager.getDescriptionFor(fReader, fFilename, IContentDescription.ALL);
          if (contentDescription != null) {
            fEncodingMemento = createMemento(contentDescription);
          } else {
            fEncodingMemento = CodedIO.createEncodingMemento("UTF-8"); // $NON-NLS-1$
          }
        } catch (NullPointerException e) {
          // TODO: work around for 5/14 bug in base, should be
          // removed when move up to 5/21
          // just created a simple default one
          fEncodingMemento = CodedIO.createEncodingMemento("UTF-8"); // $NON-NLS-1$
        }
        result = fEncodingMemento;
      } else {
        throw new IllegalStateException(
            "unexpected state: encodingMemento was null but no input stream supplied"); //$NON-NLS-1$
      }
    }
    //		try {
    //			result = getEncodingDetector().getEncodingMemento();
    //			if (result != null && !result.isValid() && !forceDefault()) {
    //				throw new UnsupportedCharsetExceptionWithDetail(result);
    //			}
    //		}
    //		finally {
    //			handleStreamClose(fEncodingDetectorStream);
    //		}
    return result;
  }
  /*
   * This method is called only when encoding is not detected in the file.
   *
   * Here is encoding lookup order we will try: - try resource content
   * description (Eclipse Text file encoding) - try resource content
   * properties (for JSP only) - try content type encoding preferences (for
   * HTML only) - try resource content description (Eclipse Text file
   * encoding, implicit check)
   *
   * Note: This method appears in both CodedReaderCreator and
   * CodedStreamCreator (with just a minor difference). They should be kept
   * the same.
   */
  private EncodingMemento getEncodingMementoFromResourceAndPreference()
      throws IOException, CoreException {
    EncodingMemento encodingMemento = fEncodingMemento;

    // Follow Eclipse Platform's direction. Get the charset from IFile.
    if (fIFile != null) {
      String charset = fIFile.getCharset();
      encodingMemento = CodedIO.createEncodingMemento(charset);
    }

    return encodingMemento;
  }
  private void dump(OutputStream outputStream, EncodingRule encodingRule, boolean use3ByteBOMifUTF8)
      throws CoreException, IOException {
    getCurrentEncodingMemento();
    String javaEncodingName = null;
    if (encodingRule == EncodingRule.CONTENT_BASED) {
      if (fCurrentEncodingMemento.isValid()) {
        javaEncodingName = fCurrentEncodingMemento.getJavaCharsetName();
      } else {
        throw new UnsupportedCharsetExceptionWithDetail(fCurrentEncodingMemento);
      }
    } else if (encodingRule == EncodingRule.IGNORE_CONVERSION_ERROR)
      javaEncodingName = fCurrentEncodingMemento.getJavaCharsetName();
    else if (encodingRule == EncodingRule.FORCE_DEFAULT)
      javaEncodingName = fCurrentEncodingMemento.getAppropriateDefault();
    // write appropriate "header" unicode BOM bytes
    // Note: Java seems to write appropriate header for
    // UTF-16, but not
    // UTF-8 nor UTF-16BE. This
    // may vary by JRE version, so need to test well.
    // Note: javaEncodingName can be null in invalid
    // cases, so we no hard
    // to skip whole check if that's the case.
    if (javaEncodingName != null) {
      if ((javaEncodingName.equals(UTF_8_CHARSET_NAME) && use3ByteBOMifUTF8)
          || (javaEncodingName.equals(UTF_8_CHARSET_NAME)
              && fCurrentEncodingMemento.isUTF83ByteBOMUsed())) {
        outputStream.write(UTF3BYTEBOM);
      } else if (javaEncodingName.equals(UTF_16LE_CHARSET_NAME)) {
        outputStream.write(UTF16LEBOM);
      } else if (javaEncodingName.equals(UTF_16BE_CHARSET_NAME)) {
        outputStream.write(UTF16BEBOM);
      }
    }
    // TODO add back in line delimiter handling the
    // "right" way (updating
    // markers, not requiring string, etc. .. may need
    // to move to document
    // level)
    // allTextBuffer =
    // handleLineDelimiter(allTextBuffer, document);
    Reader reader = getResettableReader();
    // be sure to test large "readers" ... we'll need
    // to make sure they all
    // can reset to initial position (StringReader,
    // CharArrayReader, and
    // DocumentReader should all work ok).
    reader.reset();
    // There must be cleaner logic somehow, but the
    // idea is that
    // javaEncodingName can be null
    // if original detected encoding is not valid (and
    // if FORCE_DEFAULT was
    // not specified). Hence, we WANT the first
    // Charset.forName to
    // throw appropriate exception.
    Charset charset = null;

    // this call checks "override" properties file
    javaEncodingName = CodedIO.getAppropriateJavaCharset(javaEncodingName);

    if (javaEncodingName == null) {
      charset = Charset.forName(fCurrentEncodingMemento.getDetectedCharsetName());
    } else {
      charset = Charset.forName(javaEncodingName);
    }
    CharsetEncoder charsetEncoder = charset.newEncoder();
    if (!(encodingRule == EncodingRule.IGNORE_CONVERSION_ERROR)) {
      charsetEncoder.onMalformedInput(CodingErrorAction.REPORT);
      charsetEncoder.onUnmappableCharacter(CodingErrorAction.REPORT);
    } else {
      charsetEncoder.onMalformedInput(CodingErrorAction.REPLACE);
      charsetEncoder.onUnmappableCharacter(CodingErrorAction.REPLACE);
    }
    OutputStreamWriter outputStreamWriter = new OutputStreamWriter(outputStream, charsetEncoder);
    // TODO: this may no longer be needed (and is at
    // least wrong spot for
    // it).
    //		if (checkConversion && (!(encodingRule ==
    // EncodingRule.IGNORE_CONVERSION_ERROR))) {
    //			checkConversion(fCurrentEncodingMemento,
    // encodingRule);
    //		}
    char[] charbuf = new char[CodedIO.MAX_BUF_SIZE];
    int nRead = 0;
    try {
      while (nRead != -1) {
        nRead = reader.read(charbuf, 0, MAX_BUF_SIZE);
        if (nRead > 0) {
          outputStreamWriter.flush();
          outputStreamWriter.write(charbuf, 0, nRead);
        }
      }
    } catch (UnmappableCharacterException e) {
      checkConversion(fCurrentEncodingMemento, encodingRule);
    } finally {
      // since we don't own the original output stream, we
      // won't close it ours.
      // the caller who passed it to us must close original one
      // when appropriate.
      // (but we do flush to be sure all up-to-date)
      outputStreamWriter.flush();
    }
  }