private ChannelFuture sendFile(ChannelHandlerContext ctx, Channel ch, FileChunk file) throws IOException { RandomAccessFile raf; try { raf = new RandomAccessFile(file.getFile(), "r"); } catch (FileNotFoundException fnfe) { return null; } ChannelFuture writeFuture; if (ch.getPipeline().get(SslHandler.class) != null) { // Cannot use zero-copy with HTTPS. writeFuture = ch.write(new ChunkedFile(raf, file.startOffset(), file.length(), 8192)); } else { // No encryption - use zero-copy. final FileRegion region = new DefaultFileRegion(raf.getChannel(), file.startOffset(), file.length()); writeFuture = ch.write(region); writeFuture.addListener( new ChannelFutureListener() { public void operationComplete(ChannelFuture future) { region.releaseExternalResources(); } }); } return writeFuture; }
@Override public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) throws Exception { Channel ch = e.getChannel(); Throwable cause = e.getCause(); if (cause instanceof TooLongFrameException) { sendError(ctx, BAD_REQUEST); return; } cause.printStackTrace(); if (ch.isConnected()) { sendError(ctx, INTERNAL_SERVER_ERROR); } }
private ChannelFuture sendFile(ChannelHandlerContext ctx, Channel ch, FileChunk file) throws IOException { RandomAccessFile spill; try { spill = new RandomAccessFile(file.getFile(), "r"); } catch (FileNotFoundException e) { LOG.info(file.getFile() + " not found"); return null; } ChannelFuture writeFuture; if (ch.getPipeline().get(SslHandler.class) == null) { final FadvisedFileRegion partition = new FadvisedFileRegion( spill, file.startOffset, file.length(), manageOsCache, readaheadLength, readaheadPool, file.getFile().getAbsolutePath()); writeFuture = ch.write(partition); writeFuture.addListener( new ChannelFutureListener() { // TODO error handling; distinguish IO/connection failures, // attribute to appropriate spill output @Override public void operationComplete(ChannelFuture future) { partition.releaseExternalResources(); } }); } else { // HTTPS cannot be done with zero copy. final FadvisedChunkedFile chunk = new FadvisedChunkedFile( spill, file.startOffset, file.length, sslFileBufferSize, manageOsCache, readaheadLength, readaheadPool, file.getFile().getAbsolutePath()); writeFuture = ch.write(chunk); } metrics.shuffleConnections.incr(); metrics.shuffleOutputBytes.incr(file.length); // optimistic return writeFuture; }
// TODO change AbstractService to throw InterruptedException @Override public synchronized void start() { Configuration conf = getConfig(); ServerBootstrap bootstrap = new ServerBootstrap(selector); try { pipelineFact = new HttpPipelineFactory(conf); } catch (Exception ex) { throw new RuntimeException(ex); } bootstrap.setPipelineFactory(pipelineFact); port = conf.getInt(ConfVars.PULLSERVER_PORT.varname, ConfVars.PULLSERVER_PORT.defaultIntVal); Channel ch = bootstrap.bind(new InetSocketAddress(port)); accepted.add(ch); port = ((InetSocketAddress) ch.getLocalAddress()).getPort(); conf.set(ConfVars.PULLSERVER_PORT.varname, Integer.toString(port)); pipelineFact.PullServer.setPort(port); LOG.info(getName() + " listening on port " + port); super.start(); sslFileBufferSize = conf.getInt(SUFFLE_SSL_FILE_BUFFER_SIZE_KEY, DEFAULT_SUFFLE_SSL_FILE_BUFFER_SIZE); }
@Override public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) throws Exception { HttpRequest request = (HttpRequest) e.getMessage(); if (request.getMethod() != GET) { sendError(ctx, METHOD_NOT_ALLOWED); return; } // Parsing the URL into key-values final Map<String, List<String>> params = new QueryStringDecoder(request.getUri()).getParameters(); final List<String> types = params.get("type"); final List<String> taskIdList = params.get("ta"); final List<String> subQueryIds = params.get("sid"); final List<String> partitionIds = params.get("p"); if (types == null || taskIdList == null || subQueryIds == null || partitionIds == null) { sendError(ctx, "Required type, taskIds, subquery Id, and partition id", BAD_REQUEST); return; } if (types.size() != 1 || subQueryIds.size() != 1) { sendError(ctx, "Required type, taskIds, subquery Id, and partition id", BAD_REQUEST); return; } final List<FileChunk> chunks = Lists.newArrayList(); String repartitionType = types.get(0); String sid = subQueryIds.get(0); String partitionId = partitionIds.get(0); List<String> taskIds = splitMaps(taskIdList); // the working dir of tajo worker for each query String queryBaseDir = queryId + "/output" + "/"; LOG.info( "PullServer request param: repartitionType=" + repartitionType + ", sid=" + sid + ", partitionId=" + partitionId + ", taskIds=" + taskIdList); String taskLocalDir = conf.get(ConfVars.WORKER_TEMPORAL_DIR.varname); if (taskLocalDir == null || taskLocalDir.equals("")) { LOG.error("Tajo local directory should be specified."); } LOG.info("PullServer baseDir: " + taskLocalDir + "/" + queryBaseDir); // if a subquery requires a range partitioning if (repartitionType.equals("r")) { String ta = taskIds.get(0); Path path = localFS.makeQualified( lDirAlloc.getLocalPathToRead( queryBaseDir + "/" + sid + "/" + ta + "/output/", conf)); String startKey = params.get("start").get(0); String endKey = params.get("end").get(0); boolean last = params.get("final") != null; FileChunk chunk; try { chunk = getFileCunks(path, startKey, endKey, last); } catch (Throwable t) { LOG.error("ERROR Request: " + request.getUri(), t); sendError(ctx, "Cannot get file chunks to be sent", BAD_REQUEST); return; } if (chunk != null) { chunks.add(chunk); } // if a subquery requires a hash repartition } else if (repartitionType.equals("h")) { for (String ta : taskIds) { Path path = localFS.makeQualified( lDirAlloc.getLocalPathToRead( queryBaseDir + "/" + sid + "/" + ta + "/output/" + partitionId, conf)); File file = new File(path.toUri()); FileChunk chunk = new FileChunk(file, 0, file.length()); chunks.add(chunk); } } else { LOG.error("Unknown repartition type: " + repartitionType); return; } // Write the content. Channel ch = e.getChannel(); if (chunks.size() == 0) { HttpResponse response = new DefaultHttpResponse(HTTP_1_1, NO_CONTENT); ch.write(response); if (!isKeepAlive(request)) { ch.close(); } } else { FileChunk[] file = chunks.toArray(new FileChunk[chunks.size()]); HttpResponse response = new DefaultHttpResponse(HTTP_1_1, OK); long totalSize = 0; for (FileChunk chunk : file) { totalSize += chunk.length(); } setContentLength(response, totalSize); // Write the initial line and the header. ch.write(response); ChannelFuture writeFuture = null; for (FileChunk chunk : file) { writeFuture = sendFile(ctx, ch, chunk); if (writeFuture == null) { sendError(ctx, NOT_FOUND); return; } } // Decide whether to close the connection or not. if (!isKeepAlive(request)) { // Close the connection when the whole content is written out. writeFuture.addListener(ChannelFutureListener.CLOSE); } } }
@Override public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) throws Exception { HttpRequest request = (HttpRequest) e.getMessage(); if (request.getMethod() != GET) { sendError(ctx, METHOD_NOT_ALLOWED); return; } String base = ContainerLocalizer.USERCACHE + "/" + userName + "/" + ContainerLocalizer.APPCACHE + "/" + appId + "/output" + "/"; final Map<String, List<String>> params = new QueryStringDecoder(request.getUri()).getParameters(); List<FileChunk> chunks = Lists.newArrayList(); List<String> taskIds = splitMaps(params.get("ta")); int sid = Integer.valueOf(params.get("sid").get(0)); int partitionId = Integer.valueOf(params.get("p").get(0)); for (String ta : taskIds) { File file = new File(base + "/" + sid + "/" + ta + "/output/" + partitionId); FileChunk chunk = new FileChunk(file, 0, file.length()); chunks.add(chunk); } FileChunk[] file = chunks.toArray(new FileChunk[chunks.size()]); // try { // file = retriever.handle(ctx, request); // } catch (FileNotFoundException fnf) { // LOG.error(fnf); // sendError(ctx, NOT_FOUND); // return; // } catch (IllegalArgumentException iae) { // LOG.error(iae); // sendError(ctx, BAD_REQUEST); // return; // } catch (FileAccessForbiddenException fafe) { // LOG.error(fafe); // sendError(ctx, FORBIDDEN); // return; // } catch (IOException ioe) { // LOG.error(ioe); // sendError(ctx, INTERNAL_SERVER_ERROR); // return; // } // Write the content. Channel ch = e.getChannel(); if (file == null) { HttpResponse response = new DefaultHttpResponse(HTTP_1_1, NO_CONTENT); ch.write(response); if (!isKeepAlive(request)) { ch.close(); } } else { HttpResponse response = new DefaultHttpResponse(HTTP_1_1, OK); long totalSize = 0; for (FileChunk chunk : file) { totalSize += chunk.length(); } setContentLength(response, totalSize); // Write the initial line and the header. ch.write(response); ChannelFuture writeFuture = null; for (FileChunk chunk : file) { writeFuture = sendFile(ctx, ch, chunk); if (writeFuture == null) { sendError(ctx, NOT_FOUND); return; } } // Decide whether to close the connection or not. if (!isKeepAlive(request)) { // Close the connection when the whole content is written out. writeFuture.addListener(ChannelFutureListener.CLOSE); } } }