public static void testWrite() { CopyOnWriteArrayList list = new CopyOnWriteArrayList(); list.add("k1:v1"); list.add("k2:v2"); Object obj0 = list.get(0); list.add("k3:v3"); list.set(0, "k1:vvvvvvv"); System.out.println(list.get(0).hashCode() + "@" + list.get(0)); System.out.println(obj0.hashCode() + "@" + obj0); }
private void sort() { GlobalObject temp; if (toUpdate.size() > 1) { for (int i = 0; i < toUpdate.size(); i++) { for (int j = 0; j < toUpdate.size() - i - 1; j++) { if (toUpdate.get(j).compareTo(toUpdate.get(j + 1).getUpdatePriority(), 0) > 0) { temp = toUpdate.get(j); toUpdate.set(j, toUpdate.get(j + 1)); toUpdate.set(j + 1, temp); } } } } }
/** * 第一步:通过subscriptionsByEventType得到该事件类型所有订阅者信息队列,根据优先级将当前订阅者信息插入到订阅者队列subscriptionsByEventType中; * 第二步:在typesBySubscriber中得到当前订阅者订阅的所有事件队列,将此事件保存到队列typesBySubscriber中,用于后续取消订阅; 第三步:检查这个事件是否是 * Sticky 事件,如果是则从stickyEvents事件保存队列中取出该事件类型最后一个事件发送给当前订阅者。 * * @param subscriber * @param subscriberMethod * @param sticky * @param priority */ private void subscribe( Object subscriber, SubscriberMethod subscriberMethod, boolean sticky, int priority) { Class<?> eventType = subscriberMethod.eventType; CopyOnWriteArrayList<Subscription> subscriptions = subscriptionsByEventType.get(eventType); // 通过subscriptionsByEventType得到该事件类型所有订阅者信息队列 Subscription newSubscription = new Subscription(subscriber, subscriberMethod, priority); // 根据当前的eventType,在subscriptionsByEventType生成对应的key-value(没有则加入,有则跳过次处理) if (subscriptions == null) { subscriptions = new CopyOnWriteArrayList<Subscription>(); subscriptionsByEventType.put(eventType, subscriptions); } else { if (subscriptions.contains(newSubscription)) { throw new EventBusException( "Subscriber " + subscriber.getClass() + " already registered to event " + eventType); } } // Starting with EventBus 2.2 we enforced methods to be public (might change with annotations // again) // subscriberMethod.method.setAccessible(true); // 根据优先级将当前订阅者信息插入到订阅者队列subscriptionsByEventType中; int size = subscriptions.size(); for (int i = 0; i <= size; i++) { if (i == size || newSubscription.priority > subscriptions.get(i).priority) { subscriptions.add(i, newSubscription); break; } } // 在typesBySubscriber中得到当前订阅者订阅的所有事件队列,将此事件保存到队列typesBySubscriber中,用于后续取消订阅; List<Class<?>> subscribedEvents = typesBySubscriber.get(subscriber); if (subscribedEvents == null) { subscribedEvents = new ArrayList<Class<?>>(); typesBySubscriber.put(subscriber, subscribedEvents); } subscribedEvents.add(eventType); // 检查这个事件是否是 Sticky 事件,如果是,则从stickyEvents事件保存队列中取出该事件类型最后一个事件发送给当前订阅者。 if (sticky) { if (eventInheritance) { // Existing sticky events of all subclasses of eventType have to be considered. // Note: Iterating over all events may be inefficient with lots of sticky events, // thus data structure should be changed to allow a more efficient lookup // (e.g. an additional map storing sub classes of super classes: Class -> List<Class>). Set<Map.Entry<Class<?>, Object>> entries = stickyEvents.entrySet(); for (Map.Entry<Class<?>, Object> entry : entries) { Class<?> candidateEventType = entry.getKey(); if (eventType.isAssignableFrom( candidateEventType)) { // 是用来判断一个类Class1和另一个类Class2是否相同或是另一个类的超类或接口 Object stickyEvent = entry.getValue(); checkPostStickyEventToSubscription(newSubscription, stickyEvent); } } } else { Object stickyEvent = stickyEvents.get(eventType); checkPostStickyEventToSubscription(newSubscription, stickyEvent); } } }
/** {@inheritDoc} */ public IPlayItem getItem(int index) { try { return items.get(index); } catch (IndexOutOfBoundsException e) { return null; } }
/** * Removes the specified driver from the {@code DriverManager}'s list of registered drivers. * * <p>If a {@code null} value is specified for the driver to be removed, then no action is taken. * * <p>If a security manager exists and its {@code checkPermission} denies permission, then a * {@code SecurityException} will be thrown. * * <p>If the specified driver is not found in the list of registered drivers, then no action is * taken. If the driver was found, it will be removed from the list of registered drivers. * * <p>If a {@code DriverAction} instance was specified when the JDBC driver was registered, its * deregister method will be called prior to the driver being removed from the list of registered * drivers. * * @param driver the JDBC Driver to remove * @exception SQLException if a database access error occurs * @throws SecurityException if a security manager exists and its {@code checkPermission} method * denies permission to deregister a driver. * @see SecurityManager#checkPermission */ @CallerSensitive public static synchronized void deregisterDriver(Driver driver) throws SQLException { if (driver == null) { return; } SecurityManager sec = System.getSecurityManager(); if (sec != null) { sec.checkPermission(DEREGISTER_DRIVER_PERMISSION); } println("DriverManager.deregisterDriver: " + driver); DriverInfo aDriver = new DriverInfo(driver, null); if (registeredDrivers.contains(aDriver)) { if (isDriverAllowed(driver, Reflection.getCallerClass())) { DriverInfo di = registeredDrivers.get(registeredDrivers.indexOf(aDriver)); // If a DriverAction was specified, Call it to notify the // driver that it has been deregistered if (di.action() != null) { di.action().deregister(); } registeredDrivers.remove(aDriver); } else { // If the caller does not have permission to load the driver then // throw a SecurityException. throw new SecurityException(); } } else { println(" couldn't find driver to unload"); } }
/** {@inheritDoc} */ public void previousItem() { stop(); moveToPrevious(); if (currentItemIndex == -1) { return; } IPlayItem item = items.get(currentItemIndex); play(item); }
/** {@inheritDoc} */ public void nextItem() { stop(); moveToNext(); if (currentItemIndex == -1) { return; } IPlayItem item = items.get(currentItemIndex); play(item); }
/** {@inheritDoc} */ public void setItem(int index) { if (index < 0 || index >= items.size()) { return; } stop(); currentItemIndex = index; IPlayItem item = items.get(currentItemIndex); play(item); }
public void removeDialog(Dialog d) { for (int i = 0; i < dialogs.size(); i++) { DialogWrapper p = dialogs.get(i); if (p.getDialog().equals(d)) { p.release(); dialogs.remove(i); return; } } }
/** * Disassociates given {@link View}. If view is not associated, nothing happens. * * @param view View to be disassociated */ public void removeView(final View view) { for (int i = 0; i < mViewList.size(); i++) { final ViewWeakReference reference = mViewList.get(i); final View item = reference.get(); if (item == null || item == view) { // Always remove null references to reduce Set size mViewList.remove(reference); } } }
public FinalDungeon(int size) { dungeon = new ArrayList<Room>(); mobs = new CopyOnWriteArrayList<Monster>(); dID = 600; dungeon.add(new Room(600, "", "icons" + File.separator + "default.png")); dungeon.add(new Room(601, "", "icons" + File.separator + "default.png")); dungeon.add(new Room(602, "", "icons" + File.separator + "default.png")); dungeon.add(new Room(603, "", "icons" + File.separator + "default.png")); dungeon.add(new Room(604, "", "icons" + File.separator + "default.png")); dungeon.add(new Room(605, "", "icons" + File.separator + "default.png")); dungeon.add(new Room(606, "", "icons" + File.separator + "default.png")); entrance = dungeon.get(0); dungeon.get(0).setSouth(dungeon.get(5)); dungeon.get(0).setNorth(dungeon.get(6)); dungeon.get(0).setWest(dungeon.get(1)); dungeon.get(0).setEast(dungeon.get(2)); dungeon.get(0).setUp(dungeon.get(3)); dungeon.get(0).setDown(dungeon.get(4)); dungeon.get(1).setEast(dungeon.get(0)); dungeon.get(2).setWest(dungeon.get(0)); dungeon.get(3).setDown(dungeon.get(0)); dungeon.get(4).setUp(dungeon.get(0)); dungeon.get(5).setNorth(dungeon.get(0)); dungeon.get(6).setSouth(dungeon.get(0)); mobs.add(new Andariel()); mobs.add(new Astarte()); mobs.add(new Ayperos()); mobs.add(new Caliadne()); mobs.add(new Eligos()); mobs.add(new Orobas()); mobs.get(0).setRoom(dungeon.get(6)); mobs.get(1).setRoom(dungeon.get(5)); mobs.get(2).setRoom(dungeon.get(3)); mobs.get(3).setRoom(dungeon.get(1)); mobs.get(4).setRoom(dungeon.get(2)); mobs.get(5).setRoom(dungeon.get(4)); }
/** * Associates given {@link View}. If view has been already added, nothing happens. * * @param view View to be associated */ public void addView(final View view) { for (int i = 0; i < mViewList.size(); i++) { final ViewWeakReference reference = mViewList.get(i); final View item = reference.get(); if (item == null) { // Always remove null references to reduce Set size mViewList.remove(reference); } } mViewList.addIfAbsent(new ViewWeakReference(view)); }
private synchronized void updateLayers() { mLayers = new Layer[mLayerList.size()]; int numRenderLayers = 0; for (int i = 0, n = mLayerList.size(); i < n; i++) { Layer o = mLayerList.get(i); if (o.getRenderer() != null) numRenderLayers++; mLayers[i] = o; } mLayerRenderer = new LayerRenderer[numRenderLayers]; for (int i = 0, cnt = 0, n = mLayerList.size(); i < n; i++) { Layer o = mLayerList.get(i); LayerRenderer l = o.getRenderer(); if (l != null) mLayerRenderer[cnt++] = l; } mDirtyLayers = false; }
/** {@inheritDoc} */ public void addItem(IPlayItem item, int index) { IPlayItem prev = items.get(index); if (prev != null && prev instanceof SimplePlayItem) { // since it replaces the item in the current spot, reset the items time so the sort will work ((SimplePlayItem) item).setCreated(((SimplePlayItem) prev).getCreated() - 1); } items.add(index, item); if (index <= currentItemIndex) { // item was added before the currently playing currentItemIndex++; } }
@Override public void unscheduleDrawable(final Drawable who, final Runnable what) { for (int i = 0; i < mViewList.size(); i++) { final ViewWeakReference reference = mViewList.get(i); final View view = reference.get(); if (view != null) { view.unscheduleDrawable(who); } else { // Always remove null references to reduce Set size mViewList.remove(reference); } } }
/* * Remove this class, that is stored in a class variable in CommunicationServlet, called connections */ private void removeConnection() { Enumeration<String> keys = CommunicationWebsocketServlet.connections.keys(); while (keys.hasMoreElements()) { String key = keys.nextElement(); CopyOnWriteArrayList<CommunicationWebsocketBusiness> conn = CommunicationWebsocketServlet.connections.get(key); for (int i = 0; i < conn.size(); i++) { CommunicationWebsocketBusiness c = conn.get(i); if (c.connection == this.connection) { conn.remove(c); System.out.println("Closed conn"); } } } }
public void sendMessageToAllConnections(String message) { Enumeration<String> keys = CommunicationWebsocketServlet.connections.keys(); while (keys.hasMoreElements()) { String key = keys.nextElement(); CopyOnWriteArrayList<CommunicationWebsocketBusiness> conn = CommunicationWebsocketServlet.connections.get(key); for (int i = 0; i < conn.size(); i++) { CommunicationWebsocketBusiness c = conn.get(i); try { if (c.connection != this.connection) { c.connection.sendMessage(message); c.text = message; } } catch (IOException e) { e.printStackTrace(); } } } }
@Override public Object get(final int index) { return copyOnWriteArrayList.get(index); }
private void doSnapshot(final Snapshot snapshot) throws IndexShardGatewaySnapshotFailedException { ImmutableMap<String, BlobMetaData> blobs; try { blobs = blobContainer.listBlobs(); } catch (IOException e) { throw new IndexShardGatewaySnapshotFailedException(shardId, "failed to list blobs", e); } long generation = findLatestFileNameGeneration(blobs); CommitPoints commitPoints = buildCommitPoints(blobs); currentSnapshotStatus.index().startTime(System.currentTimeMillis()); currentSnapshotStatus.updateStage(SnapshotStatus.Stage.INDEX); final SnapshotIndexCommit snapshotIndexCommit = snapshot.indexCommit(); final Translog.Snapshot translogSnapshot = snapshot.translogSnapshot(); final CountDownLatch indexLatch = new CountDownLatch(snapshotIndexCommit.getFiles().length); final CopyOnWriteArrayList<Throwable> failures = new CopyOnWriteArrayList<Throwable>(); final List<CommitPoint.FileInfo> indexCommitPointFiles = Lists.newArrayList(); int indexNumberOfFiles = 0; long indexTotalFilesSize = 0; for (final String fileName : snapshotIndexCommit.getFiles()) { StoreFileMetaData md; try { md = store.metaData(fileName); } catch (IOException e) { throw new IndexShardGatewaySnapshotFailedException( shardId, "Failed to get store file metadata", e); } boolean snapshotRequired = false; if (snapshot.indexChanged() && fileName.equals(snapshotIndexCommit.getSegmentsFileName())) { snapshotRequired = true; // we want to always snapshot the segment file if the index changed } CommitPoint.FileInfo fileInfo = commitPoints.findPhysicalIndexFile(fileName); if (fileInfo == null || !fileInfo.isSame(md) || !commitPointFileExistsInBlobs(fileInfo, blobs)) { // commit point file does not exists in any commit point, or has different length, or does // not fully exists in the listed blobs snapshotRequired = true; } if (snapshotRequired) { indexNumberOfFiles++; indexTotalFilesSize += md.length(); // create a new FileInfo try { CommitPoint.FileInfo snapshotFileInfo = new CommitPoint.FileInfo( fileNameFromGeneration(++generation), fileName, md.length(), md.checksum()); indexCommitPointFiles.add(snapshotFileInfo); snapshotFile(snapshotIndexCommit.getDirectory(), snapshotFileInfo, indexLatch, failures); } catch (IOException e) { failures.add(e); indexLatch.countDown(); } } else { indexCommitPointFiles.add(fileInfo); indexLatch.countDown(); } } currentSnapshotStatus.index().files(indexNumberOfFiles, indexTotalFilesSize); try { indexLatch.await(); } catch (InterruptedException e) { failures.add(e); } if (!failures.isEmpty()) { throw new IndexShardGatewaySnapshotFailedException( shardId(), "Failed to perform snapshot (index files)", failures.get(failures.size() - 1)); } currentSnapshotStatus .index() .time(System.currentTimeMillis() - currentSnapshotStatus.index().startTime()); currentSnapshotStatus.updateStage(SnapshotStatus.Stage.TRANSLOG); currentSnapshotStatus.translog().startTime(System.currentTimeMillis()); // Note, we assume the snapshot is always started from "base 0". We need to seek forward if we // want to lastTranslogPosition if we want the delta List<CommitPoint.FileInfo> translogCommitPointFiles = Lists.newArrayList(); int expectedNumberOfOperations = 0; boolean snapshotRequired = false; if (snapshot.newTranslogCreated()) { if (translogSnapshot.lengthInBytes() > 0) { snapshotRequired = true; expectedNumberOfOperations = translogSnapshot.estimatedTotalOperations(); } } else { // if we have a commit point, check that we have all the files listed in it in the blob store if (!commitPoints.commits().isEmpty()) { CommitPoint commitPoint = commitPoints.commits().get(0); boolean allTranslogFilesExists = true; for (CommitPoint.FileInfo fileInfo : commitPoint.translogFiles()) { if (!commitPointFileExistsInBlobs(fileInfo, blobs)) { allTranslogFilesExists = false; break; } } // if everything exists, we can seek forward in case there are new operations, otherwise, we // copy over all again... if (allTranslogFilesExists) { translogCommitPointFiles.addAll(commitPoint.translogFiles()); if (snapshot.sameTranslogNewOperations()) { translogSnapshot.seekForward(snapshot.lastTranslogLength()); if (translogSnapshot.lengthInBytes() > 0) { snapshotRequired = true; expectedNumberOfOperations = translogSnapshot.estimatedTotalOperations() - snapshot.lastTotalTranslogOperations(); } } // else (no operations, nothing to snapshot) } else { // a full translog snapshot is required if (translogSnapshot.lengthInBytes() > 0) { expectedNumberOfOperations = translogSnapshot.estimatedTotalOperations(); snapshotRequired = true; } } } else { // no commit point, snapshot all the translog if (translogSnapshot.lengthInBytes() > 0) { expectedNumberOfOperations = translogSnapshot.estimatedTotalOperations(); snapshotRequired = true; } } } currentSnapshotStatus.translog().expectedNumberOfOperations(expectedNumberOfOperations); if (snapshotRequired) { CommitPoint.FileInfo addedTranslogFileInfo = new CommitPoint.FileInfo( fileNameFromGeneration(++generation), "translog-" + translogSnapshot.translogId(), translogSnapshot.lengthInBytes(), null /* no need for checksum in translog */); translogCommitPointFiles.add(addedTranslogFileInfo); try { snapshotTranslog(translogSnapshot, addedTranslogFileInfo); } catch (Exception e) { throw new IndexShardGatewaySnapshotFailedException( shardId, "Failed to snapshot translog", e); } } currentSnapshotStatus .translog() .time(System.currentTimeMillis() - currentSnapshotStatus.translog().startTime()); // now create and write the commit point currentSnapshotStatus.updateStage(SnapshotStatus.Stage.FINALIZE); long version = 0; if (!commitPoints.commits().isEmpty()) { version = commitPoints.commits().iterator().next().version() + 1; } String commitPointName = "commit-" + Long.toString(version, Character.MAX_RADIX); CommitPoint commitPoint = new CommitPoint( version, commitPointName, CommitPoint.Type.GENERATED, indexCommitPointFiles, translogCommitPointFiles); try { byte[] commitPointData = CommitPoints.toXContent(commitPoint); blobContainer.writeBlob( commitPointName, new FastByteArrayInputStream(commitPointData), commitPointData.length); } catch (Exception e) { throw new IndexShardGatewaySnapshotFailedException( shardId, "Failed to write commit point", e); } // delete all files that are not referenced by any commit point // build a new CommitPoint, that includes this one and all the saved ones List<CommitPoint> newCommitPointsList = Lists.newArrayList(); newCommitPointsList.add(commitPoint); for (CommitPoint point : commitPoints) { if (point.type() == CommitPoint.Type.SAVED) { newCommitPointsList.add(point); } } CommitPoints newCommitPoints = new CommitPoints(newCommitPointsList); // first, go over and delete all the commit points for (String blobName : blobs.keySet()) { if (!blobName.startsWith("commit-")) { continue; } long checkedVersion = Long.parseLong(blobName.substring("commit-".length()), Character.MAX_RADIX); if (!newCommitPoints.hasVersion(checkedVersion)) { try { blobContainer.deleteBlob(blobName); } catch (IOException e) { // ignore } } } // now go over all the blobs, and if they don't exists in a commit point, delete them for (String blobName : blobs.keySet()) { String name = blobName; if (!name.startsWith("__")) { continue; } if (blobName.contains(".part")) { name = blobName.substring(0, blobName.indexOf(".part")); } if (newCommitPoints.findNameFile(name) == null) { try { blobContainer.deleteBlob(blobName); } catch (IOException e) { // ignore, will delete it laters } } } }
private void recoverIndex(CommitPoint commitPoint, ImmutableMap<String, BlobMetaData> blobs) throws Exception { int numberOfFiles = 0; long totalSize = 0; int numberOfReusedFiles = 0; long reusedTotalSize = 0; List<CommitPoint.FileInfo> filesToRecover = Lists.newArrayList(); for (CommitPoint.FileInfo fileInfo : commitPoint.indexFiles()) { String fileName = fileInfo.physicalName(); StoreFileMetaData md = null; try { md = store.metaData(fileName); } catch (Exception e) { // no file } // we don't compute checksum for segments, so always recover them if (!fileName.startsWith("segments") && md != null && fileInfo.isSame(md)) { numberOfFiles++; totalSize += md.length(); numberOfReusedFiles++; reusedTotalSize += md.length(); if (logger.isTraceEnabled()) { logger.trace( "not_recovering [{}], exists in local store and is same", fileInfo.physicalName()); } } else { if (logger.isTraceEnabled()) { if (md == null) { logger.trace( "recovering [{}], does not exists in local store", fileInfo.physicalName()); } else { logger.trace( "recovering [{}], exists in local store but is different", fileInfo.physicalName()); } } numberOfFiles++; totalSize += fileInfo.length(); filesToRecover.add(fileInfo); } } recoveryStatus.index().files(numberOfFiles, totalSize, numberOfReusedFiles, reusedTotalSize); if (filesToRecover.isEmpty()) { logger.trace("no files to recover, all exists within the local store"); } if (logger.isTraceEnabled()) { logger.trace( "recovering_files [{}] with total_size [{}], reusing_files [{}] with reused_size [{}]", numberOfFiles, new ByteSizeValue(totalSize), numberOfReusedFiles, new ByteSizeValue(reusedTotalSize)); } final CountDownLatch latch = new CountDownLatch(filesToRecover.size()); final CopyOnWriteArrayList<Throwable> failures = new CopyOnWriteArrayList<Throwable>(); for (final CommitPoint.FileInfo fileToRecover : filesToRecover) { recoverFile(fileToRecover, blobs, latch, failures); } try { latch.await(); } catch (InterruptedException e) { throw new IndexShardGatewayRecoveryException( shardId, "Interrupted while recovering index", e); } if (!failures.isEmpty()) { throw new IndexShardGatewayRecoveryException( shardId, "Failed to recover index", failures.get(0)); } // read the gateway data persisted long version = -1; try { if (IndexReader.indexExists(store.directory())) { version = IndexReader.getCurrentVersion(store.directory()); } } catch (IOException e) { throw new IndexShardGatewayRecoveryException( shardId(), "Failed to fetch index version after copying it over", e); } recoveryStatus.index().updateVersion(version); /// now, go over and clean files that are in the store, but were not in the gateway try { for (String storeFile : store.directory().listAll()) { if (!commitPoint.containPhysicalIndexFile(storeFile)) { try { store.directory().deleteFile(storeFile); } catch (Exception e) { // ignore } } } } catch (Exception e) { // ignore } }
/** Notifies the registered {@link AccessibilityStateChangeListener}s. */ private void notifyAccessibilityStateChanged() { final int listenerCount = mAccessibilityStateChangeListeners.size(); for (int i = 0; i < listenerCount; i++) { mAccessibilityStateChangeListeners.get(i).onAccessibilityStateChanged(mIsEnabled); } }
@Override public Overlay get(final int pIndex) { return mOverlayList.get(pIndex); }
/** @return One Group to which this channel belongs */ public final ArchiveGroup getGroup(final int index) { return groups.get(index); }
/** * Gets the processor at the specified position. * * @param index The position to get the processor from. * @return The processor. */ public synchronized IAudioProcessor get(int index) { return processors.get(index); }
@Override public void onNewShieldFrameReceived(ShieldFrame frame) { if (frame.getShieldId() == UIShield.TERMINAL_SHIELD.getId()) { String outputTxt = null; if (frame.getArguments() != null && frame.getArguments().size() > 0) outputTxt = frame.getArgumentAsString(0); if (outputTxt != null) { String date = terminalPrintedLines.size() == 0 || terminalPrintedLines.get(terminalPrintedLines.size() - 1).isEndedWithNewLine ? TerminalFragment.getTimeAsString() + " [RX] : " : ""; boolean isEndedWithNewLine = outputTxt.length() > 0 && outputTxt.charAt(outputTxt.length() - 1) == '\n'; if (lastItemEndedWithNewLine) { terminalPrintedLines.add( new TerminalPrintedLine( date, outputTxt.substring( 0, isEndedWithNewLine ? outputTxt.length() - 1 : outputTxt.length()), isEndedWithNewLine, true)); tempLines.add( new TerminalPrintedLine( date, getEncodedString( outputTxt.substring( 0, isEndedWithNewLine ? outputTxt.length() - 1 : outputTxt.length())), isEndedWithNewLine, true)); } else if (terminalPrintedLines.size() > 0 && tempLines.size() > 0) { terminalPrintedLines.get(terminalPrintedLines.size() - 1).print = terminalPrintedLines.get(terminalPrintedLines.size() - 1).print + outputTxt.substring( 0, isEndedWithNewLine ? outputTxt.length() - 1 : outputTxt.length()); tempLines.get(tempLines.size() - 1).print = getEncodedString(terminalPrintedLines.get(terminalPrintedLines.size() - 1).print); if (isEndedWithNewLine) terminalPrintedLines.get(terminalPrintedLines.size() - 1).isEndedWithNewLine = true; } lastItemEndedWithNewLine = isEndedWithNewLine; greaterThanThousand = terminalPrintedLines.size() > 1000; if (greaterThanThousand) { // for (int i = 0; i < 1; i++) { terminalPrintedLines.remove(0); tempLines.remove(0); // } } switch (frame.getFunctionId()) { case WRITE: if (eventHandler != null) { eventHandler.onPrint(outputTxt, greaterThanThousand); } break; case PRINT: if (eventHandler != null) { eventHandler.onPrint(outputTxt, greaterThanThousand); } break; default: break; } } // Log.d("internetLog", "Terminal " + outputTxt); } }
@Override public synchronized Layer get(int index) { return mLayerList.get(index); }