@Override public void initStore(final Store<?, ?> resource) { StoreConfig storeConfig = createdStores.get(resource); if (storeConfig == null) { throw new IllegalArgumentException( "Given store is not managed by this provider : " + resource); } final ClusteredStore clusteredStore = (ClusteredStore) resource; try { clusteredStore.storeProxy = clusteringService.getServerStoreProxy( storeConfig.getCacheIdentifier(), storeConfig.getStoreConfig(), storeConfig.getConsistency()); } catch (CachePersistenceException e) { throw new RuntimeException( "Unable to create server store proxy - " + storeConfig.getCacheIdentifier(), e); } clusteredStore.storeProxy.addInvalidationListener( new ServerStoreProxy.InvalidationListener() { @Override public void onInvalidateHash(long hash) { if (clusteredStore.invalidationValve != null) { try { LOGGER.debug("CLIENT: calling invalidation valve for hash {}", hash); clusteredStore.invalidationValve.invalidateAllWithHash(hash); } catch (StoreAccessException sae) { // TODO: what should be done here? delegate to resilience strategy? LOGGER.error("Error invalidating hash {}", hash, sae); } } } @Override public void onInvalidateAll() { if (clusteredStore.invalidationValve != null) { try { LOGGER.debug("CLIENT: calling invalidation valve for all"); clusteredStore.invalidationValve.invalidateAll(); } catch (StoreAccessException sae) { // TODO: what should be done here? delegate to resilience strategy? LOGGER.error("Error invalidating all", sae); } } } }); }
/** * 新添加一条记录 * * @return 返回记录ID */ public StoreTxLogPosition append(byte[] entry) throws IOException { int length = entry.length; if (length > storeConfig.getMaxxLogEntryLength()) { throw new DBException("Value size can not great than " + storeConfig.getMaxxLogEntryLength()); } // 检查当前文件容量是否足够 if (fileLength + length + ENTRY_HEAD_LENGTH > storeConfig.getTxLogFileSize()) { throw new CapacityNotEnoughException(); } StoreTxLogPosition result = new StoreTxLogPosition(); result.setRecordId(getNextRecordId()); boolean ok = false; try { entryBuffer.clear(); entryBuffer.put(magic); // 1 byte entryBuffer.putInt(length); // 4 byte entryBuffer.put(entry); // entry.length entryBuffer.flip(); fileChannel.position(fileLength); fileChannel.write(entryBuffer); int entryLength = (ENTRY_HEAD_LENGTH + length); fileLength += entryLength; ok = true; } finally { if (ok) { if (syncTimerTask == null || syncTimerTask.isDone()) { syncTimerTask = new FutureTimerTask("ltsdb-dblog-sync-timertask", syncCallable); syncTimer.schedule(syncTimerTask, storeConfig.getDbLogFlushInterval()); } } } return result; }
public StoreTxLog( StoreConfig storeConfig, File file, boolean readonly, boolean isNewFile, long firstRecordId) throws IOException { this.storeConfig = storeConfig; this.entryBuffer = ByteBuffer.allocate(storeConfig.getMaxxLogEntryLength() + 1 + 4); this.fileHeader = new StoreTxLogFileHeader(); if (!readonly) { syncTimer = new Timer("ltsdb-dblog-sync-timer", true); syncCallable = new FutureTimerTask.Callable() { @Override public void call() throws Exception { checkPoint(); } }; } if (isNewFile && file.exists()) { throw new IOException(file + " exists already"); } else { FileUtils.createFileIfNotExist(file); } fileChannel = FileUtils.newFileChannel(file, "rw"); if (isNewFile) { // 新文件长度为头部长度 fileLength = fileHeader.getLength(); fileHeader.setFirstRecordId(firstRecordId); fileHeader.write(fileChannel); } else { fileHeader.read(fileChannel); fileLength = fileChannel.size(); lastCheckPointLength = fileLength; } }
@Test public void test() { StoreConfig bean = super.getBean("storeConfig"); bean.syso(); }