/** Read bytes from the previous buffer. */ protected int readBytes() throws IOException { int nRead = buffer.doRead(readChunk, null); pos = readChunk.getStart(); lastValid = pos + nRead; buf = readChunk.getBytes(); return nRead; }
/** * Read bytes. * * @return If the filter does request length control, this value is significant; it should be the * number of bytes consumed from the buffer, up until the end of the current request body, or * the buffer length, whichever is greater. If the filter does not do request body length * control, the returned value should be -1. */ public int doRead(ByteChunk chunk, Request req) throws IOException { if (endChunk) return -1; if (needCRLFParse) { needCRLFParse = false; parseCRLF(); } if (remaining <= 0) { if (!parseChunkHeader()) { throw new IOException("Invalid chunk header"); } if (endChunk) { parseEndChunk(); return -1; } } int result = 0; if (pos >= lastValid) { readBytes(); } if (remaining > (lastValid - pos)) { result = lastValid - pos; remaining = remaining - result; chunk.setBytes(buf, pos, result); pos = lastValid; } else { result = remaining; chunk.setBytes(buf, pos, remaining); pos = pos + remaining; remaining = 0; // we need a CRLF if ((pos + 1) >= lastValid) { // if we call parseCRLF we overrun the buffer here // so we defer it to the next call BZ 11117 needCRLFParse = true; } else { parseCRLF(); // parse the CRLF immediately } } return result; }
/** Make the filter ready to process the next request. */ public void recycle() { remaining = 0; pos = 0; lastValid = 0; endChunk = false; needCRLFParse = false; trailingHeaders.recycle(); }
/** * Customize the error pahe * * @param req The {@link Request} object * @param res The {@link Response} object * @throws Exception */ protected void customizedErrorPage(Request req, Response res) throws Exception { /** With Grizzly, we just return a 404 with a simple error message. */ res.setMessage("Not Found"); res.setStatus(404); ByteBuffer bb = HtmlHelper.getErrorPage("Not Found", "HTTP/1.1 404 Not Found\r\n", "Grizzly"); res.setContentLength(bb.limit()); res.setContentType("text/html"); res.flushHeaders(); if (res.getChannel() != null) { res.getChannel().write(bb); req.setNote(14, "SkipAfterService"); } else { byte b[] = new byte[bb.limit()]; bb.get(b); ByteChunk chunk = new ByteChunk(); chunk.setBytes(b, 0, b.length); res.doWrite(chunk); } }
/** * Lookup a resource based on the request URI, and send it using send file. * * @param uri The request URI * @param req the {@link Request} * @param res the {@link Response} * @throws Exception */ protected void service(String uri, Request req, final Response res) throws Exception { FileInputStream fis = null; try { initWebDir(); boolean found = false; File resource = null; for (File webDir : fileFolders) { // local file resource = cache.get(uri); if (resource == null) { resource = new File(webDir, uri); if (resource.exists() && resource.isDirectory()) { final File f = new File(resource, "/index.html"); if (f.exists()) { resource = f; found = true; break; } } } if (resource.isDirectory() || !resource.exists()) { found = false; } else { found = true; break; } } cache.put(uri, resource); if (!found) { if (logger.isLoggable(Level.FINE)) { logger.log(Level.FINE, "File not found " + resource); } res.setStatus(404); if (commitErrorResponse) { customizedErrorPage(req, res); } return; } res.setStatus(200); String substr; int dot = uri.lastIndexOf("."); if (dot < 0) { substr = resource.toString(); dot = substr.lastIndexOf("."); } else { substr = uri; } if (dot > 0) { String ext = substr.substring(dot + 1); String ct = MimeType.get(ext, defaultContentType); if (ct != null) { res.setContentType(ct); } } else { res.setContentType(defaultContentType); } long length = resource.length(); res.setContentLengthLong(length); // Send the header, and flush the bytes as we will now move to use // send file. res.sendHeaders(); if (req.method().toString().equalsIgnoreCase("HEAD")) { return; } fis = new FileInputStream(resource); OutputBuffer outputBuffer = res.getOutputBuffer(); if (useSendFile && (outputBuffer instanceof FileOutputBuffer) && ((FileOutputBuffer) outputBuffer).isSupportFileSend()) { res.flush(); long nWrite = 0; while (nWrite < length) { nWrite += ((FileOutputBuffer) outputBuffer).sendFile(fis.getChannel(), nWrite, length - nWrite); } } else { byte b[] = new byte[8192]; ByteChunk chunk = new ByteChunk(); int rd; while ((rd = fis.read(b)) > 0) { chunk.setBytes(b, 0, rd); res.doWrite(chunk); } } } finally { if (fis != null) { try { fis.close(); } catch (IOException ignored) { } } } }
static { ENCODING.setBytes(ENCODING_NAME.getBytes(), 0, ENCODING_NAME.length()); }
private boolean parseHeader() throws IOException { MimeHeaders headers = request.getMimeHeaders(); byte chr = 0; while (true) { // Read new bytes if needed if (pos >= lastValid) { if (readBytes() < 0) throw new EOFException( "Unexpected end of stream whilst reading trailer headers for chunked request"); } chr = buf[pos]; if ((chr == Constants.CR) || (chr == Constants.LF)) { if (chr == Constants.LF) { pos++; return false; } } else { break; } pos++; } // Mark the current buffer position int start = trailingHeaders.getEnd(); // // Reading the header name // Header name is always US-ASCII // boolean colon = false; while (!colon) { // Read new bytes if needed if (pos >= lastValid) { if (readBytes() < 0) throw new EOFException( "Unexpected end of stream whilst reading trailer headers for chunked request"); } chr = buf[pos]; if ((chr >= Constants.A) && (chr <= Constants.Z)) { chr = (byte) (chr - Constants.LC_OFFSET); } if (chr == Constants.COLON) { colon = true; } else { trailingHeaders.append(chr); } pos++; } MessageBytes headerValue = headers.addValue(trailingHeaders.getBytes(), start, trailingHeaders.getEnd() - start); // Mark the current buffer position start = trailingHeaders.getEnd(); // // Reading the header value (which can be spanned over multiple lines) // boolean eol = false; boolean validLine = true; int lastSignificantChar = 0; while (validLine) { boolean space = true; // Skipping spaces while (space) { // Read new bytes if needed if (pos >= lastValid) { if (readBytes() < 0) throw new EOFException( "Unexpected end of stream whilst reading trailer headers for chunked request"); } chr = buf[pos]; if ((chr == Constants.SP) || (chr == Constants.HT)) { pos++; } else { space = false; } } // Reading bytes until the end of the line while (!eol) { // Read new bytes if needed if (pos >= lastValid) { if (readBytes() < 0) throw new EOFException( "Unexpected end of stream whilst reading trailer headers for chunked request"); } chr = buf[pos]; if (chr == Constants.CR) { // Skip } else if (chr == Constants.LF) { eol = true; } else if (chr == Constants.SP) { trailingHeaders.append(chr); } else { trailingHeaders.append(chr); lastSignificantChar = trailingHeaders.getEnd(); } pos++; } // Checking the first character of the new line. If the character // is a LWS, then it's a multiline header // Read new bytes if needed if (pos >= lastValid) { if (readBytes() < 0) throw new EOFException( "Unexpected end of stream whilst reading trailer headers for chunked request"); } chr = buf[pos]; if ((chr != Constants.SP) && (chr != Constants.HT)) { validLine = false; } else { eol = false; // Copying one extra space in the buffer (since there must // be at least one space inserted between the lines) trailingHeaders.append(chr); } } // Set the header value headerValue.setBytes(trailingHeaders.getBytes(), start, lastSignificantChar - start); return true; }