/** * Sets the maximum segment size in bytes, returning the builder for method chaining. * * <p>The maximum segment size dictates when logs should roll over to new segments. As entries * are written to a segment of the log, once the size of the segment surpasses the configured * maximum segment size, the log will create a new segment and append new entries to that * segment. * * <p>By default, the maximum segment size is {@code 1024 * 1024 * 32}. * * @param maxSegmentSize The maximum segment size in bytes. * @return The storage builder. * @throws IllegalArgumentException If the {@code maxSegmentSize} is not positive */ public Builder withMaxSegmentSize(int maxSegmentSize) { Assert.arg( maxSegmentSize > SegmentDescriptor.BYTES, "maxSegmentSize must be greater than " + SegmentDescriptor.BYTES); storage.maxSegmentSize = maxSegmentSize; return this; }
/** * Returns a new Atomix replica builder from the given properties. * * @param properties The properties from which to load the replica builder. * @return The replica builder. */ public static Builder builder(Properties properties) { ReplicaProperties replicaProperties = new ReplicaProperties(properties); Collection<Address> replicas = replicaProperties.replicas(); return builder(replicaProperties.clientAddress(), replicaProperties.serverAddress(), replicas) .withTransport(replicaProperties.transport()) .withStorage( Storage.builder() .withStorageLevel(replicaProperties.storageLevel()) .withDirectory(replicaProperties.storageDirectory()) .withMaxSegmentSize(replicaProperties.maxSegmentSize()) .withMaxEntriesPerSegment(replicaProperties.maxEntriesPerSegment()) .withMaxSnapshotSize(replicaProperties.maxSnapshotSize()) .withRetainStaleSnapshots(replicaProperties.retainStaleSnapshots()) .withCompactionThreads(replicaProperties.compactionThreads()) .withMinorCompactionInterval(replicaProperties.minorCompactionInterval()) .withMajorCompactionInterval(replicaProperties.majorCompactionInterval()) .withCompactionThreshold(replicaProperties.compactionThreshold()) .build()) .withSerializer(replicaProperties.serializer()) .withQuorumHint( replicaProperties.quorumHint() != -1 ? replicaProperties.quorumHint() : replicas.size()) .withBackupCount(replicaProperties.backupCount()) .withElectionTimeout(replicaProperties.electionTimeout()) .withHeartbeatInterval(replicaProperties.heartbeatInterval()) .withSessionTimeout(replicaProperties.sessionTimeout()); }
/** * Sets the maximum number of allows entries per segment, returning the builder for method * chaining. * * <p>The maximum entry count dictates when logs should roll over to new segments. As entries * are written to a segment of the log, if the entry count in that segment meets the configured * maximum entry count, the log will create a new segment and append new entries to that * segment. * * <p>By default, the maximum entries per segment is {@code 1024 * 1024}. * * @param maxEntriesPerSegment The maximum number of entries allowed per segment. * @return The storage builder. * @throws IllegalArgumentException If the {@code maxEntriesPerSegment} not greater than the * default max entries per segment */ public Builder withMaxEntriesPerSegment(int maxEntriesPerSegment) { Assert.arg(maxEntriesPerSegment > 0, "max entries per segment must be positive"); Assert.argNot( maxEntriesPerSegment > DEFAULT_MAX_ENTRIES_PER_SEGMENT, "max entries per segment cannot be greater than " + DEFAULT_MAX_ENTRIES_PER_SEGMENT); storage.maxEntriesPerSegment = maxEntriesPerSegment; return this; }
/** Sets up a server state. */ @BeforeMethod void beforeMethod() throws Throwable { serializer = new Serializer(); serializer.resolve(new ServiceLoaderTypeResolver()); storage = new Storage(StorageLevel.MEMORY); storage.serializer().resolve(new ServiceLoaderTypeResolver()); log = storage.open("test"); stateMachine = new TestStateMachine(); members = createMembers(3); transport = new LocalTransport(new LocalServerRegistry()); serverCtx = new SingleThreadContext("test-server", serializer); serverState = new ServerState( members.get(0), members, log, stateMachine, new ConnectionManager(transport.client()), serverCtx); }
/** * Sets the percentage of entries in the segment that must be cleaned before a segment can be * compacted, returning the builder for method chaining. * * <p>The compaction threshold is used during {@link * io.atomix.copycat.server.storage.compaction.Compaction#MINOR minor compaction} to determine * the set of segments to compact. By default, the compaction threshold is {@code 0.5}. * Increasing the compaction threshold will increase the number of {@link * io.atomix.copycat.server.storage.entry.Entry entries} that must be cleaned from the segment * before compaction and thus decrease the likelihood that a segment will be compacted. * Conversely, decreasing the compaction threshold will increase the frequency of compaction at * the cost of unnecessary I/O. * * @see io.atomix.copycat.server.storage.compaction.MinorCompactionManager * @param threshold The segment compact threshold. * @return The storage builder. */ public Builder withCompactionThreshold(double threshold) { storage.compactionThreshold = Assert.argNot(threshold, threshold <= 0, "threshold must be positive"); return this; }
/** * Sets the major compaction interval, returning the builder for method chaining. * * <p>The major compaction interval dictates the interval at which the {@link * io.atomix.copycat.server.storage.compaction.MajorCompactionManager} should evaluate {@link * Segment}s in the log for major compaction. Because of the performance costs of major * compaction, it is recommended that the major compaction interval be at least an order of * magnitude greater than the minor compaction interval. * * @see io.atomix.copycat.server.storage.compaction.MajorCompactionManager * @see io.atomix.copycat.server.storage.compaction.MajorCompactionTask * @param interval The major compaction interval. * @return The storage builder. */ public Builder withMajorCompactionInterval(Duration interval) { storage.majorCompactionInterval = Assert.notNull(interval, "interval"); return this; }
/** * Sets the number of log compaction threads, returning the builder for method chaining. * * <p>The compaction thread count dictates the parallelism with which the log {@link * io.atomix.copycat.server.storage.compaction.Compactor} can rewrite segments in the log. By * default, the log uses {@code Runtime.getRuntime().availableProcessors() / 2} compaction * threads. * * @param compactionThreads The number of log compaction threads. * @return The storage builder. * @throws IllegalArgumentException if {@code compactionThreads} is not positive */ public Builder withCompactionThreads(int compactionThreads) { storage.compactionThreads = Assert.arg( compactionThreads, compactionThreads > 0, "compactionThreads must be positive"); return this; }
/** * Sets whether to retain stale snapshots on disk, returning the builder for method chaining. * * <p>As the system state progresses, periodic snapshots of the state machine's state are taken. * Once a new snapshot of the state machine is taken, all preceding snapshots no longer * contribute to the state of the system and can therefore be removed from disk. By default, * snapshots will not be retained once a new snapshot is stored on disk. Enabling snapshot * retention will ensure that all snapshots will be saved, e.g. for backup purposes. * * @param retainStaleSnapshots Whether to retain stale snapshots on disk. * @return The storage builder. */ public Builder withRetainStaleSnapshots(boolean retainStaleSnapshots) { storage.retainStaleSnapshots = retainStaleSnapshots; return this; }
/** * Sets the maximum size of snapshot files on disk, returning the builder for method chaining. * * <p>The maximum snapshot size dictates the size in bytes of a single snapshot file on disk. * Reducing the maximum snapshot file size can help ensure that the system is not bogged down * with storing and replicating snapshots. By default, snapshots are practically unlimited with * a 2GB limit, but in practice they should be fairly small. * * @param maxSnapshotSize The maximum snapshot size in bytes. * @return The storage builder. * @throws IllegalArgumentException if the {@code maxSnapshotSize} is not positive or is less * than {@code 64} */ public Builder withMaxSnapshotSize(int maxSnapshotSize) { storage.maxSnapshotSize = Assert.arg(maxSnapshotSize, maxSnapshotSize >= 64, "max snapshot size must be positive"); return this; }
/** * Sets the log directory, returning the builder for method chaining. * * <p>The log will write segment files into the provided directory. If multiple {@link Storage} * objects are located on the same machine, they write logs to different directories. * * @param directory The log directory. * @return The storage builder. * @throws NullPointerException If the {@code directory} is {@code null} */ public Builder withDirectory(File directory) { storage.directory = Assert.notNull(directory, "directory"); return this; }
/** * Sets the log storage level, returning the builder for method chaining. * * <p>The storage level indicates how individual {@link * io.atomix.copycat.server.storage.entry.Entry entries} should be persisted in the log. * * @param storageLevel The log storage level. * @return The storage builder. */ public Builder withStorageLevel(StorageLevel storageLevel) { storage.storageLevel = Assert.notNull(storageLevel, "storageLevel"); return this; }
protected Storage.Builder tempStorageBuilder() { return Storage.builder().withDirectory(new File(String.format("target/test-logs/%s", logId))); }