/** * Called when the PeerCoordinator got the MetaInfo via magnet. CoordinatorListener. Create the * storage, tell SnarkManager, and give the storage back to the coordinator. * * @throws RuntimeException via fatal() * @since 0.8.4 */ public void gotMetaInfo(PeerCoordinator coordinator, MetaInfo metainfo) { try { String base = Storage.filterName(metainfo.getName()); File baseFile; if (_util.getFilesPublic()) baseFile = new File(rootDataDir, base); else baseFile = new SecureFile(rootDataDir, base); // The following two may throw IOE... storage = new Storage(_util, baseFile, metainfo, this, false); storage.check(); // ... so don't set meta until here meta = metainfo; if (completeListener != null) { String newName = completeListener.gotMetaInfo(this); if (newName != null) torrent = newName; // else some horrible problem } coordinator.setStorage(storage); } catch (IOException ioe) { if (storage != null) { try { storage.close(); } catch (IOException ioee) { } } fatal("Could not check or create storage", ioe); } }
/** {@inheritDoc} */ public void removeAuthzGroup(String azGroupId) throws AuthzPermissionException { if (azGroupId == null) return; // check for existance AuthzGroup azGroup = m_storage.get(azGroupId); if (azGroup == null) { return; } // check security (throws if not permitted) unlock(SECURE_REMOVE_AUTHZ_GROUP, authzGroupReference(azGroupId)); // complete the azGroup m_storage.remove(azGroup); // track it eventTrackingService() .post( eventTrackingService() .newEvent(SECURE_REMOVE_AUTHZ_GROUP, azGroup.getReference(), true)); // close the azGroup object ((BaseAuthzGroup) azGroup).closeEdit(); // clear any site security based on this (if a site) azGroup removeSiteSecurity(azGroup); }
/** @inheritDoc */ public DigestEdit edit(String id) throws InUseException { // security // unlock(SECURE_EDIT_DIGEST, digestReference(id)); // one add/edit at a time, please, to make sync. only one digest per user // TODO: I don't link sync... could just do the add and let it fail if it already exists // -ggolden synchronized (m_storage) { // check for existance if (!m_storage.check(id)) { try { return add(id); } catch (IdUsedException e) { M_log.warn(".edit: from the add: " + e); } } // ignore the cache - get the user with a lock from the info store DigestEdit edit = m_storage.edit(id); if (edit == null) throw new InUseException(id); ((BaseDigest) edit).setEvent(SECURE_EDIT_DIGEST); return edit; } }
public void updateStatus() { txaStatus.setText(""); txaStatus.append("size\t" + store.getRowCount() + "\n"); txaStatus.append("total\t" + store.sumLengths() + "KB\n"); txaStatus.append("\nThreads:\n"); for (int i = 0; i < numThreads; i++) { txaStatus.append(" " + i + "- "); String jb = encryptDecryptThreads[i].getCur(); if (jb == null) txaStatus.append("idle\n"); else { txaStatus.append(jobString(jb) + "\n"); } } txaStatus.append("\nJobs:\n"); int c = 6 + numThreads; int i = 0; synchronized (jobs) { for (String s : jobs) { if (c + i < TXA_HEIGHT - 1) txaStatus.append(" - " + jobString(s) + "\n"); else if (c + i == TXA_HEIGHT - 1) { txaStatus.append(" - [" + (jobs.size() - i) + "more ]"); } i++; } } }
public void totalExport() { File expf = new File("export"); if (expf.exists()) rmrf(expf); expf.mkdirs(); for (int sto = 0; sto < storeLocs.size(); sto++) { try { String sl = storeLocs.get(sto).getAbsolutePath().replaceAll("/", "-").replaceAll("\\\\", "-"); File estore = new File(expf, sl); estore.mkdir(); File log = new File(estore, LIBRARY_NAME); PrintWriter pw = new PrintWriter(log); for (int i = 0; i < store.getRowCount(); i++) if (store.curStore(i) == sto) { File enc = store.locate(i); File dec = sec.prepareMainFile(enc, estore, false); pw.println(dec.getName()); pw.println(store.getValueAt(i, Storage.COL_DATE)); pw.println(store.getValueAt(i, Storage.COL_TAGS)); synchronized (jobs) { jobs.addLast(expJob(enc, dec)); } } pw.close(); } catch (IOException exc) { exc.printStackTrace(); JOptionPane.showMessageDialog(frm, "Exporting Failed"); return; } } JOptionPane.showMessageDialog(frm, "Exporting to:\n " + expf.getAbsolutePath()); }
@Test public void shouldUpdateSingleEpoch() { storage.setEpoch(new long[] {1, 2, 3}); storage.updateEpoch(10, 1); assertArrayEquals(new long[] {1, 10, 3}, storage.getEpoch()); }
private void x_startTorrent() { boolean ok = _util.connect(); if (!ok) fatal("Unable to connect to I2P"); if (coordinator == null) { I2PServerSocket serversocket = _util.getServerSocket(); if (serversocket == null) fatal("Unable to listen for I2P connections"); else { Destination d = serversocket.getManager().getSession().getMyDestination(); if (_log.shouldLog(Log.INFO)) _log.info( "Listening on I2P destination " + d.toBase64() + " / " + d.calculateHash().toBase64()); } if (_log.shouldLog(Log.INFO)) _log.info("Starting PeerCoordinator, ConnectionAcceptor, and TrackerClient"); activity = "Collecting pieces"; coordinator = new PeerCoordinator(_util, id, infoHash, meta, storage, this, this); coordinator.setUploaded(savedUploaded); if (_peerCoordinatorSet != null) { // multitorrent _peerCoordinatorSet.add(coordinator); } else { // single torrent acceptor = new ConnectionAcceptor(_util, new PeerAcceptor(coordinator)); } // TODO pass saved closest DHT nodes to the tracker? or direct to the coordinator? trackerclient = new TrackerClient(_util, meta, additionalTrackerURL, coordinator, this); } // ensure acceptor is running when in multitorrent if (_peerCoordinatorSet != null && acceptor != null) { acceptor.startAccepting(); } stopped = false; if (coordinator.halted()) { coordinator.restart(); if (_peerCoordinatorSet != null) _peerCoordinatorSet.add(coordinator); } if (!trackerclient.started()) { trackerclient.start(); } else if (trackerclient.halted()) { if (storage != null) { try { storage.reopen(); } catch (IOException ioe) { try { storage.close(); } catch (IOException ioee) { ioee.printStackTrace(); } fatal("Could not reopen storage", ioe); } } trackerclient.start(); } else { if (_log.shouldLog(Log.INFO)) _log.info("NOT starting TrackerClient???"); } }
public void removeResponsibility(Number160 locationKey, boolean keepData) { RangeLock<Number640>.Range lockResp = lockResponsibility(locationKey); try { if (!keepData) { RangeLock<Number640>.Range lock = lock(locationKey); try { final NavigableMap<Number640, Data> removed = backend.remove( new Number640(locationKey, Number160.ZERO, Number160.ZERO, Number160.ZERO), new Number640( locationKey, Number160.MAX_VALUE, Number160.MAX_VALUE, Number160.MAX_VALUE)); for (Map.Entry<Number640, Data> entry : removed.entrySet()) { if (entry.getValue() != null) { entry.getValue().release(); } backend.removeTimeout(entry.getKey()); } } finally { lock.unlock(); } } backend.removeResponsibility(locationKey); } finally { lockResp.unlock(); } }
public Enum<?> putConfirm(PublicKey publicKey, Number640 key, Data newData) { RangeLock<Number640>.Range lock = lock(key); try { if (!securityEntryCheck( key.locationAndDomainAndContentKey(), publicKey, newData.publicKey(), newData.isProtectedEntry())) { return PutStatus.FAILED_SECURITY; } final Data data = backend.get(key); if (data != null) { // remove prepare flag data.prepareFlag(false); data.validFromMillis(newData.validFromMillis()); data.ttlSeconds(newData.ttlSeconds()); long expiration = data.expirationMillis(); // handle timeout backend.addTimeout(key, expiration); backend.put(key, data); // don't release data as we just update return PutStatus.OK; } else { return PutStatus.NOT_FOUND; } } finally { newData.release(); lock.unlock(); } // TODO: check for FORKS! }
public NavigableMap<Number640, Data> removeReturnData( Number640 from, Number640 to, PublicKey publicKey) { RangeLock<Number640>.Range lock = rangeLock.lock(from, to); try { Map<Number640, Data> tmp = backend.subMap(from, to); NavigableMap<Number640, Data> result = new TreeMap<Number640, Data>(); for (Number640 key : tmp.keySet()) { // fail fast, as soon as we want to remove 1 domain that we // cannot, abort if (!canClaimDomain(key.locationAndDomainKey(), publicKey)) { result.put(key, null); } else if (!canClaimEntry(key.locationAndDomainAndContentKey(), publicKey)) { result.put(key, null); } else { Data toRemove = backend.get(key); if (toRemove != null && (toRemove.publicKey() == null || toRemove.publicKey().equals(publicKey))) { backend.removeTimeout(key); Data removed = backend.remove(key, true); result.put(key, removed); } } } return result; } finally { lock.unlock(); } }
public void checkTimeout() { long time = System.currentTimeMillis(); Collection<Number640> toRemove = backend.subMapTimeout(time); for (Number640 key : toRemove) { RangeLock<Number640>.Range lock = lock(key); try { Data oldData = backend.remove(key, false); if (oldData != null) { oldData.release(); } backend.removeTimeout(key); // remove responsibility if we don't have any data stored under // locationkey Number160 locationKey = key.locationKey(); RangeLock<Number640>.Range lockResp = lockResponsibility(locationKey); try { if (isEmpty(locationKey)) { backend.removeResponsibility(locationKey); } } finally { lockResp.unlock(); } } finally { lock.unlock(); } } }
@Test public void testReadAndWriteCaptureChannels() throws IOException { String blobName = "test-read-and-write-capture-channels-blob"; BlobInfo blob = BlobInfo.builder(BUCKET, blobName).build(); byte[] stringBytes; BlobWriteChannel writer = storage.writer(blob); stringBytes = BLOB_STRING_CONTENT.getBytes(UTF_8); writer.write(ByteBuffer.wrap(BLOB_BYTE_CONTENT)); RestorableState<BlobWriteChannel> writerState = writer.capture(); BlobWriteChannel secondWriter = writerState.restore(); secondWriter.write(ByteBuffer.wrap(stringBytes)); secondWriter.close(); ByteBuffer readBytes; ByteBuffer readStringBytes; BlobReadChannel reader = storage.reader(blob.blobId()); reader.chunkSize(BLOB_BYTE_CONTENT.length); readBytes = ByteBuffer.allocate(BLOB_BYTE_CONTENT.length); reader.read(readBytes); RestorableState<BlobReadChannel> readerState = reader.capture(); BlobReadChannel secondReader = readerState.restore(); readStringBytes = ByteBuffer.allocate(stringBytes.length); secondReader.read(readStringBytes); reader.close(); secondReader.close(); assertArrayEquals(BLOB_BYTE_CONTENT, readBytes.array()); assertEquals(BLOB_STRING_CONTENT, new String(readStringBytes.array(), UTF_8)); assertTrue(storage.delete(BUCKET, blobName)); }
/** {@inheritDoc} */ public void save(AuthzGroup azGroup) throws GroupNotDefinedException, AuthzPermissionException { if (azGroup.getId() == null) throw new GroupNotDefinedException("<null>"); Reference ref = entityManager().newReference(azGroup.getId()); if (!SiteService.allowUpdateSiteMembership(ref.getId())) { // check security (throws if not permitted) unlock(SECURE_UPDATE_AUTHZ_GROUP, authzGroupReference(azGroup.getId())); } // make sure it's in storage if (!m_storage.check(azGroup.getId())) { // if this was new, create it in storage if (((BaseAuthzGroup) azGroup).m_isNew) { // reserve an AuthzGroup with this id from the info store - if it's in use, this will return // null AuthzGroup newAzg = m_storage.put(azGroup.getId()); if (newAzg == null) { M_log.warn("saveUsingSecurity, storage.put for a new returns null"); } } else { throw new GroupNotDefinedException(azGroup.getId()); } } // complete the save completeSave(azGroup); }
@Test public void testComposeBlobFail() { String sourceBlobName1 = "test-compose-blob-fail-source-1"; String sourceBlobName2 = "test-compose-blob-fail-source-2"; BlobInfo sourceBlob1 = BlobInfo.builder(BUCKET, sourceBlobName1).build(); BlobInfo sourceBlob2 = BlobInfo.builder(BUCKET, sourceBlobName2).build(); assertNotNull(storage.create(sourceBlob1)); assertNotNull(storage.create(sourceBlob2)); String targetBlobName = "test-compose-blob-fail-target"; BlobInfo targetBlob = BlobInfo.builder(BUCKET, targetBlobName).build(); Storage.ComposeRequest req = Storage.ComposeRequest.builder() .addSource(sourceBlobName1, -1L) .addSource(sourceBlobName2, -1L) .target(targetBlob) .build(); try { storage.compose(req); fail("StorageException was expected"); } catch (StorageException ex) { // expected } assertTrue(storage.delete(BUCKET, sourceBlobName1)); assertTrue(storage.delete(BUCKET, sourceBlobName2)); }
/** * Add member to a group, once id and security checks have been cleared. * * @param azGroup */ protected void addMemberToGroup(AuthzGroup azGroup, String userId, String roleId, int maxSize) throws GroupFullException { // update the properties (sets last modified time and modified-by) addLiveUpdateProperties((BaseAuthzGroup) azGroup); // add user to the azGroup m_storage.addNewUser(azGroup, userId, roleId, maxSize); // track it String event = ((BaseAuthzGroup) azGroup).getEvent(); if (event == null) event = SECURE_UPDATE_AUTHZ_GROUP; eventTrackingService() .post(eventTrackingService().newEvent(event, azGroup.getReference(), true)); // close the azGroup object ((BaseAuthzGroup) azGroup).closeEdit(); // update the db with latest provider, and site security with the latest changes, using the // updated azGroup BaseAuthzGroup updatedRealm = (BaseAuthzGroup) m_storage.get(azGroup.getId()); updateSiteSecurity(updatedRealm); // clear the event for next time ((BaseAuthzGroup) azGroup).setEvent(null); }
@Test public void testListBlobsSelectedFields() { String[] blobNames = { "test-list-blobs-selected-fields-blob1", "test-list-blobs-selected-fields-blob2" }; ImmutableMap metadata = ImmutableMap.of("k", "v"); BlobInfo blob1 = BlobInfo.builder(BUCKET, blobNames[0]).contentType(CONTENT_TYPE).metadata(metadata).build(); BlobInfo blob2 = BlobInfo.builder(BUCKET, blobNames[1]).contentType(CONTENT_TYPE).metadata(metadata).build(); assertNotNull(storage.create(blob1)); assertNotNull(storage.create(blob2)); Page<BlobInfo> page = storage.list( BUCKET, Storage.BlobListOption.prefix("test-list-blobs-selected-fields-blob"), Storage.BlobListOption.fields(BlobField.METADATA)); int index = 0; for (BlobInfo remoteBlob : page.values()) { assertEquals(BUCKET, remoteBlob.bucket()); assertEquals(blobNames[index++], remoteBlob.name()); assertEquals(metadata, remoteBlob.metadata()); assertNull(remoteBlob.contentType()); } assertTrue(storage.delete(BUCKET, blobNames[0])); assertTrue(storage.delete(BUCKET, blobNames[1])); }
public void secureDelete() { int rw = tblItems.getSelectedRow(); if (rw == -1) { JOptionPane.showMessageDialog(frm, "No item selected", "Error", JOptionPane.ERROR_MESSAGE); return; } int idx = tblItems.convertRowIndexToModel(rw); if (JOptionPane.showConfirmDialog( frm, "Delete " + store.plainName(idx) + "?", "Confirm Delete", JOptionPane.YES_NO_OPTION) != JOptionPane.YES_OPTION) return; File del = store.delete(idx); store.fireTableDataChanged(); if (del != null) { if (del.delete()) { // successful needsSave = true; } else { System.err.println("Delete " + del.getAbsolutePath() + " failed"); } } updateStatus(); }
public static Node allocateNewNode(Storage storage) { Node node = new Node(storage); node.clear(); node.setStoragePointer(storage.length()); storage.seek(storage.length()); storage.write(node.toBytes()); return node; }
public void end() { connector.close(); documentManager.close(); connector = null; documentManager = null; Storage.getInstance().getProjectNames().removeAll(Storage.getInstance().getProjectNames()); Storage.getInstance().getUsers().removeAll(Storage.getInstance().getUsers()); }
public void onDebugClick(View view) { Storage.getInstance().userID = 1; Storage.getInstance().firstname = "Nathaniel"; Storage.getInstance().lastname = "Woodthorpe"; Storage.getInstance().loginTime = System.currentTimeMillis(); Storage.getInstance().email = "*****@*****.**"; loggedIn(); }
@Test public void shouldCopyAcceptorsBeforeReturn() { storage.getAcceptors().clear(); BitSet acceptors = new BitSet(); acceptors.set(0, 3); assertEquals(acceptors, storage.getAcceptors()); }
@Test public void shouldSetHigherView() { storage.setView(5); assertEquals(5, storage.getView()); storage.setView(9); assertEquals(9, storage.getView()); }
@Test public void shouldUpdateFirstUncommitedWithSnapshot() { Snapshot snapshot = new Snapshot(); snapshot.setNextInstanceId(5); storage.setLastSnapshot(snapshot); storage.updateFirstUncommitted(); assertEquals(5, storage.getFirstUncommitted()); }
@Test public void shouldThrowExceptionForInvalidEpoch() { storage.setEpoch(new long[] {1, 2, 3}); try { storage.updateEpoch(5, 3); } catch (IllegalArgumentException e) { return; } fail(); }
private void testStore(StorageProvider provider, int size) throws IOException { byte[] data = createData(size); Assert.assertEquals(size, data.length); Storage storage = provider.store(new ByteArrayInputStream(data)); ByteArrayOutputStream baos = new ByteArrayOutputStream(); ContentUtil.copy(storage.getInputStream(), baos); verifyData(data, baos.toByteArray()); }
@Test public void testGetBlobEmptySelectedFields() { String blobName = "test-get-empty-selected-fields-blob"; BlobInfo blob = BlobInfo.builder(BUCKET, blobName).contentType(CONTENT_TYPE).build(); assertNotNull(storage.create(blob)); BlobInfo remoteBlob = storage.get(blob.blobId(), Storage.BlobGetOption.fields()); assertEquals(blob.blobId(), remoteBlob.blobId()); assertNull(remoteBlob.contentType()); assertTrue(storage.delete(BUCKET, blobName)); }
@Test public void testCreateEmptyBlob() { String blobName = "test-create-empty-blob"; BlobInfo blob = BlobInfo.builder(BUCKET, blobName).build(); BlobInfo remoteBlob = storage.create(blob); assertNotNull(remoteBlob); assertEquals(blob.blobId(), remoteBlob.blobId()); byte[] readBytes = storage.readAllBytes(BUCKET, blobName); assertArrayEquals(new byte[0], readBytes); assertTrue(storage.delete(BUCKET, blobName)); }
@Test public void testUpdateBlob() { String blobName = "test-update-blob"; BlobInfo blob = BlobInfo.builder(BUCKET, blobName).build(); assertNotNull(storage.create(blob)); BlobInfo updatedBlob = storage.update(blob.toBuilder().contentType(CONTENT_TYPE).build()); assertNotNull(updatedBlob); assertEquals(blob.blobId(), updatedBlob.blobId()); assertEquals(CONTENT_TYPE, updatedBlob.contentType()); assertTrue(storage.delete(BUCKET, blobName)); }
@Test public void testDeleteBlobsFail() { String sourceBlobName1 = "test-delete-blobs-fail-1"; String sourceBlobName2 = "test-delete-blobs-fail-2"; BlobInfo sourceBlob1 = BlobInfo.builder(BUCKET, sourceBlobName1).build(); BlobInfo sourceBlob2 = BlobInfo.builder(BUCKET, sourceBlobName2).build(); assertNotNull(storage.create(sourceBlob1)); List<Boolean> deleteStatus = storage.delete(sourceBlob1.blobId(), sourceBlob2.blobId()); assertTrue(deleteStatus.get(0)); assertTrue(!deleteStatus.get(1)); }
@Test public void shouldNotSetEqualView() { storage.setView(5); assertEquals(5, storage.getView()); try { storage.setView(5); } catch (IllegalArgumentException e) { return; } fail(); }