public synchronized void run() { byte[] buffer = new byte[BUFFER_SIZE]; for (; ; ) { try { this.wait(100); } catch (InterruptedException ie) { } int len = 0; try { int noBytes = pin.available(); if (noBytes > 0) { len = pin.read(buffer, 0, Math.min(noBytes, BUFFER_SIZE)); if (len > 0) { jTextArea.append(new String(buffer, 0, len)); jTextArea.setCaretPosition(jTextArea.getText().length()); } } } catch (IOException ioe) { throw new UIError("Unable to read from input stream! " + ioe.getMessage()); } } }
@Override public synchronized void close() throws IOException { super.close(); toTested.close(); toTester.close(); fromTested.close(); fromTester.close(); }
/* (non-Javadoc) * @see org.eclipse.wst.jsdt.debug.transport.TransportService#accept(org.eclipse.wst.jsdt.debug.transport.ListenerKey, long, long) */ public Connection accept(ListenerKey key, long attachTimeout, long handshakeTimeout) throws IOException { long timeout = attachTimeout; if (timeout > 0) { if (timeout > Integer.MAX_VALUE) { timeout = Integer.MAX_VALUE; // approximately 25 days! } } synchronized (listeners) { if (!listeners.containsKey(key)) throw new IllegalStateException("not listening"); // $NON-NLS-1$ if (listeners.get(key) != null) throw new IllegalStateException( "PipedTransport only accepts one accept at a time"); //$NON-NLS-1$ PipedInputStream serveris = new PipedInputStream(); PipedOutputStream clientos = new PipedOutputStream(); PipedOutputStream serveros = new PipedOutputStream(); PipedInputStream clientis = new PipedInputStream(); serveris.connect(clientos); serveros.connect(clientis); PipedConnection clientConnection = new PipedConnection(clientis, clientos); PipedConnection serverConnection = new PipedConnection(serveris, serveros); listeners.put(key, clientConnection); listeners.notifyAll(); long startTime = System.currentTimeMillis(); while (true) { try { listeners.wait(timeout); } catch (InterruptedException e) { clientConnection.close(); // Close unused client connection (and its streams); serverConnection.close(); // Close unused server connection (and its streams); throw new IOException("accept failed: interrupted"); // $NON-NLS-1$ } if (!listeners.containsKey(key)) { clientConnection.close(); // Close unused client connection (and its streams); serverConnection.close(); // Close unused server connection (and its streams); throw new IOException("accept failed: stopped listening"); // $NON-NLS-1$ } if (listeners.get(key) != null) { if (System.currentTimeMillis() - startTime > timeout) { listeners.put(key, null); clientConnection.close(); // Close unused client connection (and its streams); serverConnection.close(); // Close unused server connection (and its streams); throw new IOException("accept failed: timed out"); // $NON-NLS-1$ } continue; } return serverConnection; // From this point both, the server and the client, // are responsible to close their connections } } }
public void pipedInputStreamNotClosedAfterRead(PipedOutputStream pout) { PipedInputStream pin; try { pin = new PipedInputStream(pout); int data = pin.read(); pin.close(); } catch (IOException e) { } }
public void pipedInputStreamClosed(PipedOutputStream pout) throws IOException { PipedInputStream pin = null; try { pin = new PipedInputStream(pout); int data = pin.read(); } catch (IOException e) { } finally { pin.close(); } }
@Override public void run() { long lastInvocation = 0; byte[] readBuffer = new byte[flushBufferSize]; while (state == 0) { long curTime = System.currentTimeMillis(); try { if ((lastInvocation + flushBufferTimeout) <= curTime) { lastInvocation = curTime; // Flush given buffer size to output buffer. int read = pis.read(readBuffer, 0, readBuffer.length); if (read > 0) { out.write(readBuffer, 0, read); out.flush(); } else { break; } } // Sleep for a small unit of time. Thread.sleep(SLEEP_TIME); } catch (Exception ex) { ex.printStackTrace(System.err); break; } } System.err.println("SDS: finished, state=" + state + "; TID=" + Thread.currentThread().getId()); // In this state keep flushing rapidly. if (state == 1) { int count; byte[] buffer = new byte[8192]; try { while ((count = pis.read(buffer, 0, buffer.length)) > 0) { out.write(buffer, 0, count); out.flush(); } } catch (Exception ex) { ex.printStackTrace(System.err); } } state = 3; try { pis.close(); pos.close(); } catch (IOException ex) { ex.printStackTrace(System.err); } System.err.println("SDS: finished, state=" + state + "; TID=" + Thread.currentThread().getId()); }
public synchronized String readLine(PipedInputStream in) throws IOException { String input = ""; do { int available = in.available(); if (available == 0) { break; } byte b[] = new byte[available]; in.read(b); input = input + new String(b, 0, b.length); } while (!input.endsWith("\n") && !input.endsWith("\r\n") && !quit); return input; }
/** * ************************************************************************* CONCRETE METHOD:: * ReadFilterInputPort Purpose: This method reads data from the input port one byte at a time. * * <p>Arguments: void * * <p>Returns: byte of data read from the input port of the filter. * * <p>Exceptions: IOExecption, EndOfStreamException (rethrown) * * <p>************************************************************************** */ byte ReadFilterInputPort() throws EndOfStreamException { byte datum = 0; /** * ********************************************************************* Since delays are * possible on upstream filters, we first wait until there is data available on the input port. * We check,... if no data is available on the input port we wait for a quarter of a second and * check again. Note there is no timeout enforced here at all and if upstream filters are * deadlocked, then this can result in infinite waits in this loop. It is necessary to check to * see if we are at the end of stream in the wait loop because it is possible that the upstream * filter completes while we are waiting. If this happens and we do not check for the end of * stream, then we could wait forever on an upstream pipe that is long gone. Unfortunately Java * pipes do not throw exceptions when the input pipe is broken. * ********************************************************************* */ try { while (InputReadPort.available() == 0) { if (EndOfInputStream()) { throw new EndOfStreamException("End of input stream reached"); } // if sleep(250); } // while } // try catch (EndOfStreamException Error) { throw Error; } // catch catch (Exception Error) { System.out.println("\n" + this.getName() + " Error in read port wait loop::" + Error); } // catch /** * ********************************************************************* If at least one byte of * data is available on the input pipe we can read it. We read and write one byte to and from * ports. ********************************************************************* */ try { datum = (byte) InputReadPort.read(); return datum; } // try catch (Exception Error) { System.out.println("\n" + this.getName() + " Pipe read error::" + Error); return datum; } // catch } // ReadFilterPort
/** * ************************************************** * * <p>private int joinWithPossibleTimeout(ProcStarter proc, final TaskListener listener, * StringBuffer strBuf, AbstractBuild currentBuild) throws IOException, InterruptedException * * <p>************************************************** */ protected int joinWithPossibleTimeout( ProcStarter proc, final TaskListener listener, StringBuffer strBuf, AbstractBuild currentBuild, String stringToHide) throws IOException, InterruptedException { boolean useTimeout = configuration.isUseTimeout(); long timeoutValue = configuration.getTimeoutValue(); int result = -1; try { PipedInputStream pis = null; if (strBuf != null) { PipedOutputStream pos = new PipedOutputStream(); pis = new PipedInputStream(pos, 1000000); proc = proc.stdout(pos); } hudson.Proc procStarted = proc.start(); if (useTimeout) { result = procStarted.joinWithTimeout(timeoutValue, TimeUnit.SECONDS, listener); } else { result = procStarted.join(); } if (strBuf != null) { byte[] stdoutDataArr = new byte[pis.available()]; pis.read(stdoutDataArr, 0, stdoutDataArr.length); String stdoutStr = new String(stdoutDataArr); if (stringToHide != null) { stdoutStr = stdoutStr.replaceAll(stringToHide, "****"); } strBuf.append(stdoutStr); PrintStream output = listener.getLogger(); output.println(stdoutStr); } } catch (InterruptedException e) { throw e; } catch (Exception e) { if (listener != null) { listener.error("Exception caught in joinWithPossibleTimeout: " + e); } } return result; } // End: joinWithPossibleTimeout(...)
public int available() { try { return reader.available(); } catch (Exception e) { return -1; } }
JREClientConnector() throws IOException { toClient_outPipeStream.connect(toClient_inPipeStream); fromClient_inPipeStream.connect(fromClient_outPipeStream); client = new ClientTelnetConnection(toClient_inPipeStream, fromClient_outPipeStream); }
public void run() { try { for (; ; ) { byte[] buf = new byte[8192]; int l = out.read(buf); InputStreamReader r = new InputStreamReader(new ByteArrayInputStream(buf, 0, l)); StringBuilder sb = new StringBuilder(); for (; ; ) { int c = r.read(); if (c == -1) { break; } sb.append((char) c); } if (sb.length() > 0) { terminal.write(sb.toString()); } String s = terminal.read(); if (s != null && s.length() > 0) { for (byte b : s.getBytes()) { in.write(b); } } } } catch (IOException e) { closed = true; } }
public void run() { try { snk.read(); } catch (Exception e) { System.err.println("Test failed: unexpected exception"); } return; }
/** * ************************************************************************* CONCRETE METHOD:: * ClosePorts Purpose: This method is used to close the input and output ports of the filter. It * is important that filters close their ports before the filter thread exits. * * <p>Arguments: void * * <p>Returns: void * * <p>Exceptions: IOExecption * * <p>************************************************************************** */ void ClosePorts() { try { InputReadPort.close(); OutputWritePort.close(); } catch (Exception Error) { System.out.println("\n" + this.getName() + " ClosePorts error::" + Error); } // catch } // ClosePorts
private void closeIO() { try { fMIOutConsolePipe.close(); } catch (IOException e) { } try { fMIInConsolePipe.close(); } catch (IOException e) { } try { fMIOutLogPipe.close(); } catch (IOException e) { } try { fMIInLogPipe.close(); } catch (IOException e) { } }
public String read(int nbytes) { byte[] buf = new byte[nbytes]; try { reader.read(buf, 0, nbytes); return new String(buf, "UTF-8"); } catch (IOException e) { System.out.println("Exception reading info"); return "ERROR"; } }
@Override public void close() { closed = true; try { super.close(); out.close(); } catch (IOException e) { logger.error("Unexpected IO exception", e); } }
/** * ************************************************************************* CONCRETE METHOD:: * Connect Purpose: This method connects filters to each other. All connections are through the * inputport of each filter. That is each filter's inputport is connected to another filter's * output port through this method. * * <p>Arguments: FilterFramework - this is the filter that this filter will connect to. * * <p>Returns: void * * <p>Exceptions: IOException * * <p>************************************************************************** */ void Connect(FilterFramework Filter) { try { // Connect this filter's input to the upstream pipe's output stream InputReadPort.connect(Filter.OutputWritePort); InputFilter = Filter; } // try catch (Exception Error) { System.out.println("\n" + this.getName() + " FilterFramework error connecting::" + Error); } // catch } // Connect
public static void main(String[] argv) throws Exception { PipedOutputStream os = new PipedOutputStream(); PipedInputStream is = new PipedInputStream(); is.connect(os); // create reader thread LazyReader lr = new LazyReader(is); os.write(new byte[1000]); lr.start(); while (lr.isAlive()) { Thread.sleep(100); } try { os.write(27); throw new Exception("Test failed: shouldn't be able to write"); } catch (IOException e) { // test passed } }
public static void main(String[] argv) throws InterruptedException { // Set up a reasonable buffer size for this test if one is not already // specified String prop = System.getProperty("gnu.java.io.pipe_size"); // if (prop == null) // System.setProperty("gnu.java.io.pipe_size", "32"); try { System.out.println("Started test of PipedInputStream and " + "PipedOutputStream"); System.out.println("Test 1: Basic piped stream test"); // Set up the thread to write PipedStreamTestWriter pstw = new PipedStreamTestWriter(); String str = pstw.getStr(); PipedOutputStream pos = pstw.getStream(); // Now set up our reader PipedInputStream pis = new PipedInputStream(); pis.connect(pos); new Thread(pstw).start(); byte[] buf = new byte[12]; int bytes_read, total_read = 0; while ((bytes_read = pis.read(buf)) != -1) { System.out.print(new String(buf, 0, bytes_read)); System.out.flush(); Thread.sleep(10); // A short delay total_read += bytes_read; } if (total_read == str.length()) System.out.println("PASSED: Basic piped stream test"); else System.out.println("FAILED: Basic piped stream test"); } catch (IOException e) { System.out.println("FAILED: Basic piped stream test: " + e); } }
@Override public void process() throws Exception { log.trace("process()"); HandlerThread handlerThread = new HandlerThread(); handlerThread.setName(Thread.currentThread().getName() + "-soap"); handlerThread.start(); try { // Wait for the request SOAP message to be parsed before we can // start sending stuff. waitForSoapMessage(); // If the handler thread excepted, do not continue. checkError(); // Verify that the client is registered verifyClientStatus(); // Check client authentication mode verifyClientAuthentication(); // If the message is synchronous, start sending proxy message if (!isAsync) { processRequest(); } if (response != null) { sendResponse(); } } catch (Exception e) { if (reqIns != null) { reqIns.close(); } // Let's interrupt the handler thread so that it won't // block forever waiting for us to do something. handlerThread.interrupt(); throw e; } finally { handlerThread.join(); if (response != null) { response.consume(); } } }
/** * The local connection was meant to use PipeStreams instead of initiateQueueDataExchange. As I * had issues, I resorted to Queues. Unfortunately, this means that the protocol handshake is * written twice. */ private void initiate() { try { logger.log(Level.INFO, "Starting the server Protocol"); serverProtocol.startProtocol(outputStream, inputStream); } catch (UnexpectedProtocolException e) { logger.log(Level.SEVERE, "Server received an incorrect protocol from the client."); e.printStackTrace(); } catch (IOException e) { logger.log(Level.SEVERE, "Error trying to start the server protocol"); e.printStackTrace(); } finally { try { logger.log(Level.INFO, "Server closing streams"); outputStream.close(); inputStream.close(); } catch (IOException e) { e.printStackTrace(); } } }
public static void main(String[] args) throws IOException { // TODO Auto-generated method stub Thread t = new sumulator(); t.start(); System.out.println("in... initDat"); // 1. 데이터 정리 // 2. 간단한 자료구조 // 3. 자료구조 데이터를 변경하는 쓰레드 생성( policy 가지고 있음) // 4. 메인 줄기엣 IPC while 로 대기 while (true) { // try { // Thread.sleep(1000); // } catch (InterruptedException e) { // // TODO Auto-generated catch block // e.printStackTrace(); // } // 1. 커멘드 정의 // 2. 커멘드 파싱 // 3. 해당 명령에 따라 분기 // String readCommand = null; // pIutputStream.read(readCommand.getBytes()); int a = pIutputStream.read(); // pOutputStream.write("hello".getBytes()); pOutputStream.write(9999); // System.out.println(" nNumRunning: " + nNumRunning + " nNumIdel: " // + nNumRunning + " nNumUnhealthy: " + nNumUnhealthy // + " nNumAvailable: " + nNumAvailable + " nNumQueue: " // + nNumQueue); } }
private void sendRequest(HttpSender httpSender) throws Exception { log.trace("sendRequest()"); try { // If we're using SSL, we need to include the provider name in // the HTTP request so that server proxy could verify the SSL // certificate properly. if (SystemProperties.isSslEnabled()) { httpSender.setAttribute(AuthTrustVerifier.ID_PROVIDERNAME, requestServiceId); } // Start sending the request to server proxies. The underlying // SSLConnectionSocketFactory will select the fastest address // (socket that connects first) from the provided addresses. // Dummy service address is only needed so that host name resolving // could do its thing and start the ssl connection. URI[] addresses = getServiceAddresses(requestServiceId, requestSoap.getSecurityServer()); httpSender.setAttribute(ID_TARGETS, addresses); httpSender.setTimeout(SystemProperties.getClientProxyTimeout()); httpSender.addHeader(HEADER_HASH_ALGO_ID, getHashAlgoId()); httpSender.addHeader(HEADER_PROXY_VERSION, ProxyMain.getVersion()); try { httpSender.doPost( getDummyServiceAddress(addresses), reqIns, CHUNKED_LENGTH, outputContentType); } catch (Exception e) { // Failed to connect to server proxy MonitorAgent.serverProxyFailed(createRequestMessageInfo()); // Rethrow throw e; } } finally { if (reqIns != null) { reqIns.close(); } } }
/** * Extract data to cache. Call synchronized on ctx. * * @param key The key the data was fetched from. * @param archiveType The archive type. Must be Metadata.ARCHIVE_ZIP | Metadata.ARCHIVE_TAR. * @param data The actual data fetched. * @param archiveContext The context for the whole fetch process. * @param ctx The ArchiveStoreContext for this key. * @param element A particular element that the caller is especially interested in, or null. * @param callback A callback to be called if we find that element, or if we don't. * @throws ArchiveFailureException If we could not extract the data, or it was too big, etc. * @throws ArchiveRestartException * @throws ArchiveRestartException If the request needs to be restarted because the archive * changed. */ public void extractToCache( FreenetURI key, ARCHIVE_TYPE archiveType, COMPRESSOR_TYPE ctype, final Bucket data, ArchiveContext archiveContext, ArchiveStoreContext ctx, String element, ArchiveExtractCallback callback, ClientContext context) throws ArchiveFailureException, ArchiveRestartException { logMINOR = Logger.shouldLog(LogLevel.MINOR, this); MutableBoolean gotElement = element != null ? new MutableBoolean() : null; if (logMINOR) Logger.minor(this, "Extracting " + key); ctx.removeAllCachedItems(this); // flush cache anyway final long expectedSize = ctx.getLastSize(); final long archiveSize = data.size(); /** * Set if we need to throw a RestartedException rather than returning success, after we have * unpacked everything. */ boolean throwAtExit = false; if ((expectedSize != -1) && (archiveSize != expectedSize)) { throwAtExit = true; ctx.setLastSize(archiveSize); } byte[] expectedHash = ctx.getLastHash(); if (expectedHash != null) { byte[] realHash; try { realHash = BucketTools.hash(data); } catch (IOException e) { throw new ArchiveFailureException("Error reading archive data: " + e, e); } if (!Arrays.equals(realHash, expectedHash)) throwAtExit = true; ctx.setLastHash(realHash); } if (archiveSize > archiveContext.maxArchiveSize) throw new ArchiveFailureException( "Archive too big (" + archiveSize + " > " + archiveContext.maxArchiveSize + ")!"); else if (archiveSize <= 0) throw new ArchiveFailureException("Archive too small! (" + archiveSize + ')'); else if (logMINOR) Logger.minor(this, "Container size (possibly compressed): " + archiveSize + " for " + data); InputStream is = null; try { final ExceptionWrapper wrapper; if ((ctype == null) || (ARCHIVE_TYPE.ZIP == archiveType)) { if (logMINOR) Logger.minor(this, "No compression"); is = data.getInputStream(); wrapper = null; } else if (ctype == COMPRESSOR_TYPE.BZIP2) { if (logMINOR) Logger.minor(this, "dealing with BZIP2"); is = new BZip2CompressorInputStream(data.getInputStream()); wrapper = null; } else if (ctype == COMPRESSOR_TYPE.GZIP) { if (logMINOR) Logger.minor(this, "dealing with GZIP"); is = new GZIPInputStream(data.getInputStream()); wrapper = null; } else if (ctype == COMPRESSOR_TYPE.LZMA_NEW) { // LZMA internally uses pipe streams, so we may as well do it here. // In fact we need to for LZMA_NEW, because of the properties bytes. PipedInputStream pis = new PipedInputStream(); PipedOutputStream pos = new PipedOutputStream(); pis.connect(pos); final OutputStream os = new BufferedOutputStream(pos); wrapper = new ExceptionWrapper(); context.mainExecutor.execute( new Runnable() { @Override public void run() { InputStream is = null; try { Compressor.COMPRESSOR_TYPE.LZMA_NEW.decompress( is = data.getInputStream(), os, data.size(), expectedSize); } catch (CompressionOutputSizeException e) { Logger.error(this, "Failed to decompress archive: " + e, e); wrapper.set(e); } catch (IOException e) { Logger.error(this, "Failed to decompress archive: " + e, e); wrapper.set(e); } finally { try { os.close(); } catch (IOException e) { Logger.error(this, "Failed to close PipedOutputStream: " + e, e); } Closer.close(is); } } }); is = pis; } else if (ctype == COMPRESSOR_TYPE.LZMA) { if (logMINOR) Logger.minor(this, "dealing with LZMA"); is = new LzmaInputStream(data.getInputStream()); wrapper = null; } else { wrapper = null; } if (ARCHIVE_TYPE.ZIP == archiveType) handleZIPArchive(ctx, key, is, element, callback, gotElement, throwAtExit, context); else if (ARCHIVE_TYPE.TAR == archiveType) handleTARArchive(ctx, key, is, element, callback, gotElement, throwAtExit, context); else throw new ArchiveFailureException( "Unknown or unsupported archive algorithm " + archiveType); if (wrapper != null) { Exception e = wrapper.get(); if (e != null) throw new ArchiveFailureException( "An exception occured decompressing: " + e.getMessage(), e); } } catch (IOException ioe) { throw new ArchiveFailureException("An IOE occured: " + ioe.getMessage(), ioe); } finally { Closer.close(is); } }
public int read() throws IOException { return pIn.read(); }
public void closePipedInputStream() throws IOException { pIn.close(); }