/** * Convert a ClientKeyBlock to a Bucket. If an error occurs, report it via onFailure and return * null. */ protected Bucket extract( ClientKeyBlock block, Object token, ObjectContainer container, ClientContext context) { Bucket data; try { data = block.decode( context.getBucketFactory(persistent), (int) (Math.min(ctx.maxOutputLength, Integer.MAX_VALUE)), false); } catch (KeyDecodeException e1) { if (Logger.shouldLog(Logger.MINOR, this)) Logger.minor(this, "Decode failure: " + e1, e1); onFailure( new FetchException(FetchException.BLOCK_DECODE_ERROR, e1.getMessage()), token, container, context); return null; } catch (TooBigException e) { onFailure( new FetchException(FetchException.TOO_BIG, e.getMessage()), token, container, context); return null; } catch (IOException e) { Logger.error(this, "Could not capture data - disk full?: " + e, e); onFailure(new FetchException(FetchException.BUCKET_ERROR, e), token, container, context); return null; } if (Logger.shouldLog(Logger.MINOR, this)) Logger.minor( this, data == null ? "Could not decode: null" : ("Decoded " + data.size() + " bytes")); return data; }
// Real onFailure protected void onFailure( FetchException e, Object token, ObjectContainer container, ClientContext context) { if (persistent) { container.activate(segment, 1); container.activate(parent, 1); container.activate(segment.errors, 1); } boolean forceFatal = false; if (parent.isCancelled()) { if (Logger.shouldLog(Logger.MINOR, this)) Logger.minor(this, "Failing: cancelled"); e = new FetchException(FetchException.CANCELLED); forceFatal = true; } segment.errors.inc(e.getMode()); if (e.isFatal() && token == null) { segment.fail(e, container, context, false); } else if (e.isFatal() || forceFatal) { segment.onFatalFailure(e, ((MySendableRequestItem) token).x, this, container, context); } else { segment.onNonFatalFailure(e, ((MySendableRequestItem) token).x, this, container, context); } removeBlockNum(((MySendableRequestItem) token).x, container, false); if (persistent) { container.deactivate(segment, 1); container.deactivate(parent, 1); container.deactivate(segment.errors, 1); } }
public static boolean writeTo(InputStream input, File target) throws FileNotFoundException, IOException { DataInputStream dis = null; FileOutputStream fos = null; File file = File.createTempFile("temp", ".tmp", target.getParentFile()); if (Logger.shouldLog(Logger.MINOR, FileUtil.class)) Logger.minor(FileUtil.class, "Writing to " + file + " to be renamed to " + target); try { dis = new DataInputStream(input); fos = new FileOutputStream(file); int len = 0; byte[] buffer = new byte[4096]; while ((len = dis.read(buffer)) > 0) { fos.write(buffer, 0, len); } } catch (IOException e) { throw e; } finally { if (dis != null) dis.close(); if (fos != null) fos.close(); } if (FileUtil.renameTo(file, target)) return true; else { file.delete(); return false; } }
ClientRequestSchedulerNonPersistent( ClientRequestScheduler sched, boolean forInserts, boolean forSSKs, RandomSource random) { super(forInserts, forSSKs, random); this.sched = sched; if (!forInserts) recentSuccesses = new LinkedList<BaseSendableGet>(); else recentSuccesses = null; logMINOR = Logger.shouldLog(LogLevel.MINOR, this); }
/** * @param node * @param old If true, use the old ARK rather than the new ARK */ NodeARKInserter(Node node, NodeCrypto crypto, NodeIPPortDetector detector, boolean enableARKs) { this.node = node; this.crypto = crypto; this.detector = detector; logMINOR = Logger.shouldLog(Logger.MINOR, this); if (crypto.isOpennet) darknetOpennetString = "Opennet"; else darknetOpennetString = "Darknet"; this.enabled = enableARKs; }
public void onFailure( BulkCallFailureItem[] items, ObjectContainer container, ClientContext context) { FetchException[] fetchExceptions = new FetchException[items.length]; int countFatal = 0; if (persistent) { container.activate(blockNums, 2); } for (int i = 0; i < items.length; i++) { fetchExceptions[i] = translateException(items[i].e); if (fetchExceptions[i].isFatal()) countFatal++; removeBlockNum(((MySendableRequestItem) items[i].token).x, container, true); } if (persistent) { container.store(blockNums); container.deactivate(blockNums, 2); container.activate(segment, 1); container.activate(parent, 1); container.activate(segment.errors, 1); } if (parent.isCancelled()) { if (Logger.shouldLog(Logger.MINOR, this)) Logger.minor(this, "Failing: cancelled"); // Fail the segment. segment.fail(new FetchException(FetchException.CANCELLED), container, context, false); // FIXME do we need to free the keyNum's??? Or will that happen later anyway? return; } for (int i = 0; i < fetchExceptions.length; i++) segment.errors.inc(fetchExceptions[i].getMode()); int nonFatalExceptions = items.length - countFatal; int[] blockNumbers = new int[nonFatalExceptions]; if (countFatal > 0) { FetchException[] newFetchExceptions = new FetchException[items.length - countFatal]; // Call the fatal callbacks directly. int x = 0; for (int i = 0; i < items.length; i++) { int blockNum = ((MySendableRequestItem) items[i].token).x; if (fetchExceptions[i].isFatal()) { segment.onFatalFailure(fetchExceptions[i], blockNum, this, container, context); } else { blockNumbers[x] = blockNum; newFetchExceptions[x] = fetchExceptions[i]; x++; } } fetchExceptions = newFetchExceptions; } else { for (int i = 0; i < blockNumbers.length; i++) blockNumbers[i] = ((MySendableRequestItem) items[i].token).x; } segment.onNonFatalFailure(fetchExceptions, blockNumbers, this, container, context); if (persistent) { container.deactivate(segment, 1); container.deactivate(parent, 1); container.deactivate(segment.errors, 1); } }
SplitFileFetcherSubSegment( SplitFileFetcherSegment segment, ClientRequester parent, int retryCount) { super(parent); this.segment = segment; this.retryCount = retryCount; if (parent == null) throw new NullPointerException(); ctx = segment.blockFetchContext; blockNums = new Vector<Integer>(); logMINOR = Logger.shouldLog(Logger.MINOR, this); }
public UdpSocketHandler( int listenPort, InetAddress bindto, Node node, long startupTime, String title, IOStatisticCollector collector) throws SocketException { this.node = node; this.collector = collector; this.title = title; _bindTo = bindto; // Keep the Updater code in, just commented out, for now // We may want to be able to do on-line updates. // if (Updater.hasResource()) { // _sock = (DatagramSocket) Updater.getResource(); // } else { this.listenPort = listenPort; _sock = new DatagramSocket(listenPort, bindto); int sz = _sock.getReceiveBufferSize(); if (sz < 65536) { _sock.setReceiveBufferSize(65536); } try { // Exit reasonably quickly _sock.setReuseAddress(true); } catch (SocketException e) { throw new RuntimeException(e); } // } // Only used for debugging, no need to seed from Yarrow dropRandom = node.fastWeakRandom; logMINOR = Logger.shouldLog(LogLevel.MINOR, this); logDEBUG = Logger.shouldLog(LogLevel.DEBUG, this); tracker = AddressTracker.create(node.lastBootID, node.runDir(), listenPort); tracker.startSend(startupTime); }
@Override public void requeueAfterCooldown( Key key, long time, ObjectContainer container, ClientContext context) { if (persistent) { container.activate(segment, 1); } if (cancelled) { if (Logger.shouldLog(Logger.MINOR, this)) Logger.minor(this, "Not requeueing as already cancelled"); return; } if (Logger.shouldLog(Logger.MINOR, this)) Logger.minor(this, "Requeueing after cooldown " + key + " for " + this); if (!segment.requeueAfterCooldown(key, time, container, context, this)) { Logger.error( this, "Key was not wanted after cooldown: " + key + " for " + this + " in requeueAfterCooldown"); } if (persistent) container.deactivate(segment, 1); }
public ToadletContextImpl( Socket sock, MultiValueTable<String, String> headers, BucketFactory bf, PageMaker pageMaker, ToadletContainer container) throws IOException { this.headers = headers; this.closed = false; sockOutputStream = sock.getOutputStream(); remoteAddr = sock.getInetAddress(); if (Logger.shouldLog(Logger.DEBUG, this)) Logger.debug(this, "Connection from " + remoteAddr); this.bf = bf; this.pagemaker = pageMaker; this.container = container; }
private SendableRequestItem getRandomBlockNum( KeysFetchingLocally keys, ClientContext context, ObjectContainer container) { if (persistent) { container.activate(this, 1); container.activate(blockNums, 1); container.activate(segment, 1); } logMINOR = Logger.shouldLog(Logger.MINOR, this); synchronized (segment) { if (blockNums.isEmpty()) { if (logMINOR) Logger.minor(this, "No blocks to remove"); return null; } for (int i = 0; i < 10; i++) { Integer ret; int x; if (blockNums.size() == 0) return null; x = context.random.nextInt(blockNums.size()); ret = blockNums.get(x); int num = ret; Key key = segment.getBlockNodeKey(num, container); if (key == null) { if (segment.isFinishing(container) || segment.isFinished(container)) return null; if (segment.haveBlock(num, container)) Logger.error(this, "Already have block " + ret + " but was in blockNums on " + this); else Logger.error(this, "Key is null for block " + ret + " for " + this); continue; } if (keys.hasKey(key)) { continue; } if (logMINOR) Logger.minor( this, "Removing block " + x + " of " + (blockNums.size() + 1) + " : " + ret + " on " + this); return new MySendableRequestItem(num); } return null; } }
public void unregister(ObjectContainer container, ClientContext context, short oldPrio) { RandomGrabArray arr = getParentGrabArray(); if (arr != null) { if (persistent) container.activate(arr, 1); arr.remove(this, container); } else { // Should this be a higher priority? if (Logger.shouldLog(Logger.MINOR, this)) Logger.minor( this, "Cannot unregister " + this + " : not registered", new Exception("debug")); } ClientRequester cr = getClientRequest(); if (persistent) container.activate(cr, 1); getScheduler(context).removeFromAllRequestsByClientRequest(cr, this, true, container); // FIXME should we deactivate?? // if(persistent) container.deactivate(cr, 1); }
/** @return True if the caller should schedule. */ public boolean add( int blockNo, ObjectContainer container, ClientContext context, boolean dontComplainOnDupes) { if (persistent) { // container.activate(segment, 1); container.activate(blockNums, 1); } boolean logMINOR = Logger.shouldLog(Logger.MINOR, this); if (logMINOR) Logger.minor(this, "Adding block " + blockNo + " to " + this); if (blockNo < 0) throw new IllegalArgumentException(); Integer i = Integer.valueOf(blockNo); boolean schedule = true; synchronized (segment) { if (cancelled) throw new IllegalStateException( "Adding block " + blockNo + " to already cancelled " + this); if (blockNums.contains(i)) { if (!dontComplainOnDupes) Logger.error(this, "Block numbers already contain block " + blockNo); else if (logMINOR) Logger.minor(this, "Block numbers already contain block " + blockNo); } else { blockNums.add(i); } /** * Race condition: * * <p>Starter thread sees there is only one block on us, so removes us. Another thread adds a * block. We don't schedule as we now have two blocks. Starter thread removes us. Other blocks * may be added later, but we are never rescheduled. * * <p>Fixing this by only removing the SendableRequest after we've removed the block is * nontrivial with the current code. So what we do here is simply check whether we are * registered, instead of checking whether blockNums.size() > 1 as we used to. */ if (schedule && getParentGrabArray() != null) { if (logMINOR) Logger.minor( this, "Already registered, not scheduling: " + blockNums.size() + " : " + blockNums); schedule = false; } } if (persistent) container.store(blockNums); return schedule; }
/** * Create an ArchiveManager. * * @param maxHandlers The maximum number of cached ArchiveHandler's i.e. the maximum number of * containers to track. * @param maxCachedData The maximum size of the cache directory, in bytes. * @param maxArchiveSize The maximum size of an archive. * @param maxArchivedFileSize The maximum extracted size of a single file in any archive. * @param maxCachedElements The maximum number of cached elements (an element is a file extracted * from an archive. It is stored, encrypted and padded, in a single file. * @param tempBucketFactory */ public ArchiveManager( int maxHandlers, long maxCachedData, long maxArchivedFileSize, int maxCachedElements, BucketFactory tempBucketFactory) { maxArchiveHandlers = maxHandlers; // FIXME PERFORMANCE I'm assuming there isn't much locality here, so it's faster to use the // FAST_COMPARATOR. // This may not be true if there are a lot of sites with many containers all inserted as // individual SSKs? archiveHandlers = LRUMap.createSafeMap(FreenetURI.FAST_COMPARATOR); this.maxCachedElements = maxCachedElements; this.maxCachedData = maxCachedData; storedData = new LRUMap<ArchiveKey, ArchiveStoreItem>(); this.maxArchivedFileSize = maxArchivedFileSize; this.tempBucketFactory = tempBucketFactory; logMINOR = Logger.shouldLog(LogLevel.MINOR, this); }
public void update() { logMINOR = Logger.shouldLog(Logger.MINOR, this); if (logMINOR) Logger.minor(this, "update()"); if (!checkIPUpdated()) return; // We'll broadcast the new physical.udp entry to our connected peers via a differential node // reference // We'll err on the side of caution and not update our peer to an empty physical.udp entry using // a differential node reference SimpleFieldSet nfs = crypto.exportPublicFieldSet(false, false, true); String[] entries = nfs.getAll("physical.udp"); if (entries != null) { SimpleFieldSet fs = new SimpleFieldSet(true); fs.putOverwrite("physical.udp", entries); if (logMINOR) Logger.minor(this, darknetOpennetString + " ref's physical.udp is '" + fs.toString() + "'"); node.peers.locallyBroadcastDiffNodeRef(fs, !crypto.isOpennet, crypto.isOpennet); } else { if (logMINOR) Logger.minor(this, darknetOpennetString + " ref's physical.udp is null"); } // Proceed with inserting the ARK if (logMINOR) Logger.minor(this, "Inserting " + darknetOpennetString + " ARK because peers list changed"); if (inserter != null) { // Already inserting. // Re-insert after finished. synchronized (this) { shouldInsert = true; } return; } // Otherwise need to start an insert if (node.noConnectedPeers()) { // Can't start an insert yet synchronized (this) { shouldInsert = true; } return; } startInserter(); }
public void onFailure(FetchException e, ClientGetter state, ObjectContainer container) { if (finished) return; synchronized (this) { succeeded = false; getFailedMessage = new GetFailedMessage(e, identifier, global); finished = true; started = true; } if (Logger.shouldLog(Logger.MINOR, this)) Logger.minor(this, "Caught " + e, e); trySendDataFoundOrGetFailed(null, container); if (persistenceType == PERSIST_FOREVER) { container.activate(client, 1); } // We do not want the data to be removed on failure, because the request // may be restarted, and the bucket persists on the getter, even if we get rid of it here. // freeData(container); finish(container); if (client != null) client.notifyFailure(this, container); if (persistenceType == PERSIST_FOREVER) container.store(this); }
public void addAll( int[] blocks, ObjectContainer container, ClientContext context, boolean dontComplainOnDupes) { if (persistent) { // container.activate(segment, 1); container.activate(blockNums, 1); } boolean logMINOR = Logger.shouldLog(Logger.MINOR, this); if (logMINOR) Logger.minor(this, "Adding " + blocks + " blocks to " + this); synchronized (segment) { if (cancelled) throw new IllegalStateException("Adding blocks to already cancelled " + this); for (int x = 0; x < blocks.length; x++) { int i = blocks[x]; Integer ii = Integer.valueOf(i); if (blockNums.contains(ii)) { if (!dontComplainOnDupes) Logger.error(this, "Block numbers already contain block " + i); else if (logMINOR) Logger.minor(this, "Block numbers already contain block " + i); } else { blockNums.add(ii); } } } if (persistent) container.store(blockNums); }
/** Handle an incoming connection. Blocking, obviously. */ public static void handle(Socket sock, ToadletContainer container, PageMaker pageMaker) { try { InputStream is = new BufferedInputStream(sock.getInputStream(), 4096); LineReadingInputStream lis = new LineReadingInputStream(is); while (true) { String firstLine = lis.readLine(32768, 128, false); // ISO-8859-1 or US-ASCII, _not_ UTF-8 if (firstLine == null) { sock.close(); return; } else if (firstLine.equals("")) { continue; } boolean logMINOR = Logger.shouldLog(Logger.MINOR, ToadletContextImpl.class); if (logMINOR) Logger.minor(ToadletContextImpl.class, "first line: " + firstLine); String[] split = firstLine.split(" "); if (split.length != 3) throw new ParseException( "Could not parse request line (split.length=" + split.length + "): " + firstLine); if (!split[2].startsWith("HTTP/1.")) throw new ParseException("Unrecognized protocol " + split[2]); URI uri; try { uri = URIPreEncoder.encodeURI(split[1]).normalize(); if (logMINOR) Logger.minor( ToadletContextImpl.class, "URI: " + uri + " path " + uri.getPath() + " host " + uri.getHost() + " frag " + uri.getFragment() + " port " + uri.getPort() + " query " + uri.getQuery() + " scheme " + uri.getScheme()); } catch (URISyntaxException e) { sendURIParseError(sock.getOutputStream(), true, e); return; } String method = split[0]; MultiValueTable<String, String> headers = new MultiValueTable<String, String>(); while (true) { String line = lis.readLine(32768, 128, false); // ISO-8859 or US-ASCII, not UTF-8 if (line == null) { sock.close(); return; } // System.out.println("Length="+line.length()+": "+line); if (line.length() == 0) break; int index = line.indexOf(':'); if (index < 0) { throw new ParseException("Missing ':' in request header field"); } String before = line.substring(0, index).toLowerCase(); String after = line.substring(index + 1); after = after.trim(); headers.put(before, after); } boolean disconnect = shouldDisconnectAfterHandled(split[2].equals("HTTP/1.0"), headers) || !container.enablePersistentConnections(); boolean allowPost = container.allowPosts(); BucketFactory bf = container.getBucketFactory(); ToadletContextImpl ctx = new ToadletContextImpl(sock, headers, bf, pageMaker, container); ctx.shouldDisconnect = disconnect; /* * copy the data into a bucket now, * before we go into the redirect loop */ Bucket data; boolean methodIsConfigurable = true; String slen = headers.get("content-length"); if (METHODS_MUST_HAVE_DATA.contains(method)) { // <method> must have data methodIsConfigurable = false; if (slen == null) { ctx.shouldDisconnect = true; ctx.sendReplyHeaders(400, "Bad Request", null, null, -1); return; } } else if (METHODS_CANNOT_HAVE_DATA.contains(method)) { // <method> can not have data methodIsConfigurable = false; if (slen != null) { ctx.shouldDisconnect = true; ctx.sendReplyHeaders(400, "Bad Request", null, null, -1); return; } } if (slen != null) { long len; try { len = Integer.parseInt(slen); if (len < 0) throw new NumberFormatException("content-length less than 0"); } catch (NumberFormatException e) { ctx.shouldDisconnect = true; ctx.sendReplyHeaders(400, "Bad Request", null, null, -1); return; } if (allowPost && ((!container.publicGatewayMode()) || ctx.isAllowedFullAccess())) { data = bf.makeBucket(len); BucketTools.copyFrom(data, is, len); } else { FileUtil.skipFully(is, len); if (method.equals("POST")) { ctx.sendMethodNotAllowed("POST", true); } else { sendError( sock.getOutputStream(), 403, "Forbidden", "Content not allowed in this configuration", true, null); } ctx.close(); return; } } else { // we're not doing to use it, but we have to keep // the compiler happy data = null; } if (!container.enableExtendedMethodHandling()) { if (!METHODS_RESTRICTED_MODE.contains(method)) { sendError( sock.getOutputStream(), 403, "Forbidden", "Method not allowed in this configuration", true, null); return; } } // Handle it. try { boolean redirect = true; while (redirect) { // don't go around the loop unless set explicitly redirect = false; Toadlet t; try { t = container.findToadlet(uri); } catch (PermanentRedirectException e) { Toadlet.writePermanentRedirect(ctx, "Found elsewhere", e.newuri.toASCIIString()); break; } if (t == null) { ctx.sendNoToadletError(ctx.shouldDisconnect); break; } // if the Toadlet does not support the method, we don't need to parse the data // also due this pre check a 'NoSuchMethodException' should never appear if (!(t.findSupportedMethods().contains(method))) { ctx.sendMethodNotAllowed(method, ctx.shouldDisconnect); break; } HTTPRequestImpl req = new HTTPRequestImpl(uri, data, ctx, method); try { String methodName = "handleMethod" + method; try { Class<? extends Toadlet> c = t.getClass(); Method m = c.getMethod(methodName, HANDLE_PARAMETERS); if (methodIsConfigurable) { AllowData anno = m.getAnnotation(AllowData.class); if (anno == null) { if (data != null) { sendError( sock.getOutputStream(), 400, "Bad Request", "Content not allowed", true, null); ctx.close(); return; } } else if (anno.value()) { if (data == null) { sendError( sock.getOutputStream(), 400, "Bad Request", "Missing Content", true, null); ctx.close(); return; } } } ctx.setActiveToadlet(t); Object arglist[] = new Object[] {uri, req, ctx}; m.invoke(t, arglist); } catch (InvocationTargetException ite) { throw ite.getCause(); } } catch (RedirectException re) { uri = re.newuri; redirect = true; } finally { req.freeParts(); } } if (ctx.shouldDisconnect) { sock.close(); return; } } finally { if (data != null) data.free(); } } } catch (ParseException e) { try { sendError( sock.getOutputStream(), 400, "Bad Request", l10n("parseErrorWithError", "error", e.getMessage()), true, null); } catch (IOException e1) { // Ignore } } catch (TooLongException e) { try { sendError( sock.getOutputStream(), 400, "Bad Request", l10n("headersLineTooLong"), true, null); } catch (IOException e1) { // Ignore } } catch (IOException e) { // ignore and return } catch (ToadletContextClosedException e) { Logger.error( ToadletContextImpl.class, "ToadletContextClosedException while handling connection!"); } catch (Throwable t) { Logger.error(ToadletContextImpl.class, "Caught error: " + t + " handling socket", t); try { sendError(sock.getOutputStream(), 500, "Internal Error", t.toString(), true, null); } catch (IOException e1) { // ignore and return } } }
/** Handle an incoming connection. Blocking, obviously. */ public static void handle(Socket sock, ToadletContainer container, PageMaker pageMaker) { try { InputStream is = new BufferedInputStream(sock.getInputStream(), 4096); LineReadingInputStream lis = new LineReadingInputStream(is); while (true) { String firstLine = lis.readLine(32768, 128, false); // ISO-8859-1 or US-ASCII, _not_ UTF-8 if (firstLine == null) { sock.close(); return; } else if (firstLine.equals("")) { continue; } boolean logMINOR = Logger.shouldLog(Logger.MINOR, ToadletContextImpl.class); if (logMINOR) Logger.minor(ToadletContextImpl.class, "first line: " + firstLine); String[] split = firstLine.split(" "); if (split.length != 3) throw new ParseException( "Could not parse request line (split.length=" + split.length + "): " + firstLine); if (!split[2].startsWith("HTTP/1.")) throw new ParseException("Unrecognized protocol " + split[2]); URI uri; try { uri = URIPreEncoder.encodeURI(split[1]).normalize(); if (logMINOR) Logger.minor( ToadletContextImpl.class, "URI: " + uri + " path " + uri.getPath() + " host " + uri.getHost() + " frag " + uri.getFragment() + " port " + uri.getPort() + " query " + uri.getQuery() + " scheme " + uri.getScheme()); } catch (URISyntaxException e) { sendURIParseError(sock.getOutputStream(), true, e); return; } String method = split[0]; MultiValueTable<String, String> headers = new MultiValueTable<String, String>(); while (true) { String line = lis.readLine(32768, 128, false); // ISO-8859 or US-ASCII, not UTF-8 if (line == null) { sock.close(); return; } // System.out.println("Length="+line.length()+": "+line); if (line.length() == 0) break; int index = line.indexOf(':'); if (index < 0) { throw new ParseException("Missing ':' in request header field"); } String before = line.substring(0, index).toLowerCase(); String after = line.substring(index + 1); after = after.trim(); headers.put(before, after); } boolean disconnect = shouldDisconnectAfterHandled(split[2].equals("HTTP/1.0"), headers) || !container.enablePersistentConnections(); boolean allowPost = container.allowPosts(); BucketFactory bf = container.getBucketFactory(); ToadletContextImpl ctx = new ToadletContextImpl(sock, headers, bf, pageMaker, container); ctx.shouldDisconnect = disconnect; /* * if we're handling a POST, copy the data into a bucket now, * before we go into the redirect loop */ Bucket data; if (method.equals("POST")) { String slen = headers.get("content-length"); if (slen == null) { sendError( sock.getOutputStream(), 400, "Bad Request", l10n("noContentLengthInPOST"), true, null); return; } long len; try { len = Integer.parseInt(slen); if (len < 0) throw new NumberFormatException("content-length less than 0"); } catch (NumberFormatException e) { sendError( sock.getOutputStream(), 400, "Bad Request", l10n("cannotParseContentLengthWithError", "error", e.toString()), true, null); return; } if (allowPost && ((!container.publicGatewayMode()) || ctx.isAllowedFullAccess())) { data = bf.makeBucket(len); BucketTools.copyFrom(data, is, len); } else { FileUtil.skipFully(is, len); ctx.sendMethodNotAllowed("POST", true); ctx.close(); return; } } else { // we're not doing to use it, but we have to keep // the compiler happy data = null; } // Handle it. try { boolean redirect = true; while (redirect) { // don't go around the loop unless set explicitly redirect = false; Toadlet t; try { t = container.findToadlet(uri); } catch (PermanentRedirectException e) { Toadlet.writePermanentRedirect(ctx, "Found elsewhere", e.newuri.toASCIIString()); break; } if (t == null) { ctx.sendNoToadletError(ctx.shouldDisconnect); break; } HTTPRequestImpl req = new HTTPRequestImpl(uri, data, ctx, method); try { if (method.equals("GET")) { ctx.setActiveToadlet(t); t.handleGet(uri, req, ctx); ctx.close(); } else if (method.equals("POST")) { ctx.setActiveToadlet(t); t.handlePost(uri, req, ctx); } else { ctx.sendMethodNotAllowed(method, ctx.shouldDisconnect); ctx.close(); } } catch (RedirectException re) { uri = re.newuri; redirect = true; } finally { req.freeParts(); } } if (ctx.shouldDisconnect) { sock.close(); return; } } finally { if (data != null) data.free(); } } } catch (ParseException e) { try { sendError( sock.getOutputStream(), 400, "Bad Request", l10n("parseErrorWithError", "error", e.getMessage()), true, null); } catch (IOException e1) { // Ignore } } catch (TooLongException e) { try { sendError( sock.getOutputStream(), 400, "Bad Request", l10n("headersLineTooLong"), true, null); } catch (IOException e1) { // Ignore } } catch (IOException e) { // ignore and return } catch (ToadletContextClosedException e) { Logger.error( ToadletContextImpl.class, "ToadletContextClosedException while handling connection!"); } catch (Throwable t) { Logger.error(ToadletContextImpl.class, "Caught error: " + t + " handling socket", t); try { sendError(sock.getOutputStream(), 500, "Internal Error", t.toString(), true, null); } catch (IOException e1) { // ignore and return } } }
/** * Extract data to cache. Call synchronized on ctx. * * @param key The key the data was fetched from. * @param archiveType The archive type. Must be Metadata.ARCHIVE_ZIP | Metadata.ARCHIVE_TAR. * @param data The actual data fetched. * @param archiveContext The context for the whole fetch process. * @param ctx The ArchiveStoreContext for this key. * @param element A particular element that the caller is especially interested in, or null. * @param callback A callback to be called if we find that element, or if we don't. * @throws ArchiveFailureException If we could not extract the data, or it was too big, etc. * @throws ArchiveRestartException * @throws ArchiveRestartException If the request needs to be restarted because the archive * changed. */ public void extractToCache( FreenetURI key, ARCHIVE_TYPE archiveType, COMPRESSOR_TYPE ctype, final Bucket data, ArchiveContext archiveContext, ArchiveStoreContext ctx, String element, ArchiveExtractCallback callback, ClientContext context) throws ArchiveFailureException, ArchiveRestartException { logMINOR = Logger.shouldLog(LogLevel.MINOR, this); MutableBoolean gotElement = element != null ? new MutableBoolean() : null; if (logMINOR) Logger.minor(this, "Extracting " + key); ctx.removeAllCachedItems(this); // flush cache anyway final long expectedSize = ctx.getLastSize(); final long archiveSize = data.size(); /** * Set if we need to throw a RestartedException rather than returning success, after we have * unpacked everything. */ boolean throwAtExit = false; if ((expectedSize != -1) && (archiveSize != expectedSize)) { throwAtExit = true; ctx.setLastSize(archiveSize); } byte[] expectedHash = ctx.getLastHash(); if (expectedHash != null) { byte[] realHash; try { realHash = BucketTools.hash(data); } catch (IOException e) { throw new ArchiveFailureException("Error reading archive data: " + e, e); } if (!Arrays.equals(realHash, expectedHash)) throwAtExit = true; ctx.setLastHash(realHash); } if (archiveSize > archiveContext.maxArchiveSize) throw new ArchiveFailureException( "Archive too big (" + archiveSize + " > " + archiveContext.maxArchiveSize + ")!"); else if (archiveSize <= 0) throw new ArchiveFailureException("Archive too small! (" + archiveSize + ')'); else if (logMINOR) Logger.minor(this, "Container size (possibly compressed): " + archiveSize + " for " + data); InputStream is = null; try { final ExceptionWrapper wrapper; if ((ctype == null) || (ARCHIVE_TYPE.ZIP == archiveType)) { if (logMINOR) Logger.minor(this, "No compression"); is = data.getInputStream(); wrapper = null; } else if (ctype == COMPRESSOR_TYPE.BZIP2) { if (logMINOR) Logger.minor(this, "dealing with BZIP2"); is = new BZip2CompressorInputStream(data.getInputStream()); wrapper = null; } else if (ctype == COMPRESSOR_TYPE.GZIP) { if (logMINOR) Logger.minor(this, "dealing with GZIP"); is = new GZIPInputStream(data.getInputStream()); wrapper = null; } else if (ctype == COMPRESSOR_TYPE.LZMA_NEW) { // LZMA internally uses pipe streams, so we may as well do it here. // In fact we need to for LZMA_NEW, because of the properties bytes. PipedInputStream pis = new PipedInputStream(); PipedOutputStream pos = new PipedOutputStream(); pis.connect(pos); final OutputStream os = new BufferedOutputStream(pos); wrapper = new ExceptionWrapper(); context.mainExecutor.execute( new Runnable() { @Override public void run() { InputStream is = null; try { Compressor.COMPRESSOR_TYPE.LZMA_NEW.decompress( is = data.getInputStream(), os, data.size(), expectedSize); } catch (CompressionOutputSizeException e) { Logger.error(this, "Failed to decompress archive: " + e, e); wrapper.set(e); } catch (IOException e) { Logger.error(this, "Failed to decompress archive: " + e, e); wrapper.set(e); } finally { try { os.close(); } catch (IOException e) { Logger.error(this, "Failed to close PipedOutputStream: " + e, e); } Closer.close(is); } } }); is = pis; } else if (ctype == COMPRESSOR_TYPE.LZMA) { if (logMINOR) Logger.minor(this, "dealing with LZMA"); is = new LzmaInputStream(data.getInputStream()); wrapper = null; } else { wrapper = null; } if (ARCHIVE_TYPE.ZIP == archiveType) handleZIPArchive(ctx, key, is, element, callback, gotElement, throwAtExit, context); else if (ARCHIVE_TYPE.TAR == archiveType) handleTARArchive(ctx, key, is, element, callback, gotElement, throwAtExit, context); else throw new ArchiveFailureException( "Unknown or unsupported archive algorithm " + archiveType); if (wrapper != null) { Exception e = wrapper.get(); if (e != null) throw new ArchiveFailureException( "An exception occured decompressing: " + e.getMessage(), e); } } catch (IOException ioe) { throw new ArchiveFailureException("An IOE occured: " + ioe.getMessage(), ioe); } finally { Closer.close(is); } }
@Override void log(String s) { if (Logger.shouldLog(Logger.DEBUG, this)) Logger.debug(this, s); }
public void objectOnDelete(ObjectContainer container) { if (Logger.shouldLog(Logger.DEBUG, this)) Logger.debug(this, "Deleting URI", new Exception("debug")); }
public void handleMethodPOST(URI uri, HTTPRequest request, ToadletContext ctx) throws ToadletContextClosedException, IOException, RedirectException { if (!ctx.checkFullAccess(this)) return; // User requested reset to defaults, so present confirmation page. if (request.isPartSet("confirm-reset-to-defaults")) { PageNode page = ctx.getPageMaker().getPageNode(l10n("confirmResetTitle"), ctx); HTMLNode pageNode = page.outer; HTMLNode contentNode = page.content; HTMLNode content = ctx.getPageMaker() .getInfobox( "infobox-warning", l10n("confirmResetTitle"), contentNode, "reset-confirm", true); content.addChild("#", l10n("confirmReset")); HTMLNode formNode = ctx.addFormChild(content, path(), "yes-button"); String subconfig = request.getPartAsStringFailsafe("subconfig", MAX_PARAM_VALUE_SIZE); formNode.addChild( "input", new String[] {"type", "name", "value"}, new String[] {"hidden", "subconfig", subconfig}); // Persist visible fields so that they are reset to default or // unsaved changes are persisted. for (String part : request.getParts()) { if (part.startsWith(subconfig)) { formNode.addChild( "input", new String[] {"type", "name", "value"}, new String[] { "hidden", part, request.getPartAsStringFailsafe(part, MAX_PARAM_VALUE_SIZE) }); } } formNode.addChild( "input", new String[] {"type", "name", "value"}, new String[] { "submit", "reset-to-defaults", NodeL10n.getBase().getString("Toadlet.yes") }); formNode.addChild( "input", new String[] {"type", "name", "value"}, new String[] { "submit", "decline-default-reset", NodeL10n.getBase().getString("Toadlet.no") }); writeHTMLReply(ctx, 200, "OK", pageNode.generate()); return; } // Returning from directory selector with a selection or declining // resetting settings to defaults. // Re-render config page with any changes made in the selector and/or // persisting values changed but // not applied. if (request.isPartSet(LocalFileBrowserToadlet.selectDir) || request.isPartSet("decline-default-reset")) { handleMethodGET(uri, request, ctx); return; } // Entering directory selector from config page. // This would be two loops if it checked for a redirect // (key.startsWith("select-directory.")) before // constructing params string. It always constructs it, then redirects // if it turns out to be needed. boolean directorySelector = false; StringBuilder paramsBuilder = new StringBuilder(); paramsBuilder.append('?'); String value; for (String key : request.getParts()) { // Prepare parts for page selection redirect: // Extract option and put into "select-for"; preserve others. value = request.getPartAsStringFailsafe(key, MAX_PARAM_VALUE_SIZE); if (key.startsWith("select-directory.")) { paramsBuilder .append("select-for=") .append(URLEncoder.encode(key.substring("select-directory.".length()), true)) .append('&'); directorySelector = true; } else { paramsBuilder .append(URLEncoder.encode(key, true)) .append('=') .append(URLEncoder.encode(value, true)) .append('&'); } } String params = paramsBuilder.toString(); if (directorySelector) { MultiValueTable<String, String> headers = new MultiValueTable<String, String>(1); // params ends in &. Download directory browser starts in default // download directory. headers.put( "Location", directoryBrowserPath + params + "path=" + core.getDownloadsDir().getAbsolutePath()); ctx.sendReplyHeaders(302, "Found", headers, null, 0); return; } StringBuilder errbuf = new StringBuilder(); boolean logMINOR = Logger.shouldLog(LogLevel.MINOR, this); String prefix = request.getPartAsStringFailsafe("subconfig", MAX_PARAM_VALUE_SIZE); if (logMINOR) { Logger.minor(this, "Current config prefix is " + prefix); } boolean resetToDefault = request.isPartSet("reset-to-defaults"); if (resetToDefault && logMINOR) { Logger.minor(this, "Resetting to defaults"); } for (Option<?> o : config.get(prefix).getOptions()) { String configName = o.getName(); if (logMINOR) { Logger.minor(this, "Checking option " + prefix + '.' + configName); } // This ignores unrecognized parameters. if (request.isPartSet(prefix + '.' + configName)) { // Current subconfig is to be reset to default. if (resetToDefault) { // Disallow resetting fproxy port number to default as it // might break the link to start fproxy on the system tray, // shortcuts etc. if (prefix.equals("fproxy") && configName.equals("port")) continue; value = o.getDefault(); } else { value = request.getPartAsStringFailsafe(prefix + '.' + configName, MAX_PARAM_VALUE_SIZE); } if (!(o.getValueDisplayString().equals(value))) { if (logMINOR) { Logger.minor(this, "Changing " + prefix + '.' + configName + " to " + value); } try { o.setValue(value); } catch (InvalidConfigValueException e) { errbuf.append(o.getName()).append(' ').append(e.getMessage()).append('\n'); } catch (NodeNeedRestartException e) { needRestart = true; } catch (Exception e) { errbuf.append(o.getName()).append(' ').append(e).append('\n'); Logger.error(this, "Caught " + e, e); } } else if (logMINOR) { Logger.minor(this, prefix + '.' + configName + " not changed"); } } } // Wrapper params String wrapperConfigName = "wrapper.java.maxmemory"; if (request.isPartSet(wrapperConfigName)) { value = request.getPartAsStringFailsafe(wrapperConfigName, MAX_PARAM_VALUE_SIZE); if (!WrapperConfig.getWrapperProperty(wrapperConfigName).equals(value)) { if (logMINOR) { Logger.minor(this, "Setting " + wrapperConfigName + " to " + value); } WrapperConfig.setWrapperProperty(wrapperConfigName, value); } } config.store(); PageNode page = ctx.getPageMaker().getPageNode(l10n("appliedTitle"), ctx); HTMLNode pageNode = page.outer; HTMLNode contentNode = page.content; if (errbuf.length() == 0) { HTMLNode content = ctx.getPageMaker() .getInfobox( "infobox-success", l10n("appliedTitle"), contentNode, "configuration-applied", true); content.addChild("#", l10n("appliedSuccess")); if (needRestart) { content.addChild("br"); content.addChild("#", l10n("needRestart")); if (node.isUsingWrapper()) { content.addChild("br"); HTMLNode restartForm = ctx.addFormChild(content, "/", "restartForm"); restartForm.addChild( "input", // new String[] {"type", "name"}, // new String[] {"hidden", "restart"}); restartForm.addChild( "input", // new String[] {"type", "name", "value"}, // new String[] { "submit", "restart2", // l10n("restartNode") }); } if (needRestartUserAlert == null) { needRestartUserAlert = new NeedRestartUserAlert(ctx.getFormPassword()); ctx.getAlertManager().register(needRestartUserAlert); } } } else { HTMLNode content = ctx.getPageMaker() .getInfobox( "infobox-error", l10n("appliedFailureTitle"), contentNode, "configuration-error", true) .addChild("div", "class", "infobox-content"); content.addChild("#", l10n("appliedFailureExceptions")); content.addChild("br"); content.addChild("#", errbuf.toString()); } HTMLNode content = ctx.getPageMaker() .getInfobox( "infobox-normal", l10n("possibilitiesTitle"), contentNode, "configuration-possibilities", false); content.addChild( "a", new String[] {"href", "title"}, new String[] {path(), l10n("shortTitle")}, l10n("returnToNodeConfig")); content.addChild("br"); addHomepageLink(content); writeHTMLReply(ctx, 200, "OK", pageNode.generate()); }