mirror of
https://github.com/vacp2p/linea-besu.git
synced 2026-01-08 23:17:54 -05:00
Snapsync persist state (#4381)
This PR avoids restarting the download of the world state from scratch when restarting Besu Signed-off-by: Karim TAAM <karim.t2am@gmail.com>
This commit is contained in:
@@ -33,6 +33,7 @@ import java.util.function.Predicate;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
import org.apache.tuweni.bytes.Bytes;
|
||||
|
||||
public class InMemoryKeyValueStorage implements KeyValueStorage {
|
||||
@@ -77,7 +78,30 @@ public class InMemoryKeyValueStorage implements KeyValueStorage {
|
||||
|
||||
@Override
|
||||
public Set<byte[]> getAllKeysThat(final Predicate<byte[]> returnCondition) {
|
||||
return streamKeys().filter(returnCondition).collect(toUnmodifiableSet());
|
||||
return stream()
|
||||
.filter(pair -> returnCondition.test(pair.getKey()))
|
||||
.map(Pair::getKey)
|
||||
.collect(toUnmodifiableSet());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Set<byte[]> getAllValuesFromKeysThat(final Predicate<byte[]> returnCondition) {
|
||||
return stream()
|
||||
.filter(pair -> returnCondition.test(pair.getKey()))
|
||||
.map(Pair::getValue)
|
||||
.collect(toUnmodifiableSet());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Stream<Pair<byte[], byte[]>> stream() {
|
||||
final Lock lock = rwLock.readLock();
|
||||
lock.lock();
|
||||
try {
|
||||
return ImmutableSet.copyOf(hashValueStore.entrySet()).stream()
|
||||
.map(bytesEntry -> Pair.of(bytesEntry.getKey().toArrayUnsafe(), bytesEntry.getValue()));
|
||||
} finally {
|
||||
lock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -85,7 +109,8 @@ public class InMemoryKeyValueStorage implements KeyValueStorage {
|
||||
final Lock lock = rwLock.readLock();
|
||||
lock.lock();
|
||||
try {
|
||||
return ImmutableSet.copyOf(hashValueStore.keySet()).stream().map(Bytes::toArrayUnsafe);
|
||||
return ImmutableSet.copyOf(hashValueStore.entrySet()).stream()
|
||||
.map(bytesEntry -> bytesEntry.getKey().toArrayUnsafe());
|
||||
} finally {
|
||||
lock.unlock();
|
||||
}
|
||||
|
||||
@@ -34,6 +34,7 @@ import java.util.stream.Stream;
|
||||
import com.google.common.cache.Cache;
|
||||
import com.google.common.cache.CacheBuilder;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
import org.apache.tuweni.bytes.Bytes;
|
||||
|
||||
/**
|
||||
@@ -81,7 +82,30 @@ public class LimitedInMemoryKeyValueStorage implements KeyValueStorage {
|
||||
|
||||
@Override
|
||||
public Set<byte[]> getAllKeysThat(final Predicate<byte[]> returnCondition) {
|
||||
return streamKeys().filter(returnCondition).collect(toUnmodifiableSet());
|
||||
return stream()
|
||||
.filter(pair -> returnCondition.test(pair.getKey()))
|
||||
.map(Pair::getKey)
|
||||
.collect(toUnmodifiableSet());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Set<byte[]> getAllValuesFromKeysThat(final Predicate<byte[]> returnCondition) {
|
||||
return stream()
|
||||
.filter(pair -> returnCondition.test(pair.getKey()))
|
||||
.map(Pair::getValue)
|
||||
.collect(toUnmodifiableSet());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Stream<Pair<byte[], byte[]>> stream() {
|
||||
final Lock lock = rwLock.readLock();
|
||||
lock.lock();
|
||||
try {
|
||||
return ImmutableSet.copyOf(storage.asMap().entrySet()).stream()
|
||||
.map(bytesEntry -> Pair.of(bytesEntry.getKey().toArrayUnsafe(), bytesEntry.getValue()));
|
||||
} finally {
|
||||
lock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -89,7 +113,8 @@ public class LimitedInMemoryKeyValueStorage implements KeyValueStorage {
|
||||
final Lock lock = rwLock.readLock();
|
||||
lock.lock();
|
||||
try {
|
||||
return ImmutableSet.copyOf(storage.asMap().keySet()).stream().map(Bytes::toArrayUnsafe);
|
||||
return ImmutableSet.copyOf(storage.asMap().entrySet()).stream()
|
||||
.map(bytesEntry -> bytesEntry.getKey().toArrayUnsafe());
|
||||
} finally {
|
||||
lock.unlock();
|
||||
}
|
||||
|
||||
@@ -23,6 +23,8 @@ import java.util.Set;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
/**
|
||||
* Service provided by Besu to facilitate persistent data storage.
|
||||
*
|
||||
@@ -58,8 +60,9 @@ public interface SegmentedKeyValueStorage<S> extends Closeable {
|
||||
* @param segmentHandle The segment handle whose keys we want to stream.
|
||||
* @return A stream of all keys in the specified segment.
|
||||
*/
|
||||
Stream<byte[]> streamKeys(final S segmentHandle);
|
||||
Stream<Pair<byte[], byte[]>> stream(final S segmentHandle);
|
||||
|
||||
Stream<byte[]> streamKeys(final S segmentHandle);
|
||||
/**
|
||||
* Delete the value corresponding to the given key in the given segment if a write lock can be
|
||||
* instantly acquired on the underlying storage. Do nothing otherwise.
|
||||
@@ -74,6 +77,8 @@ public interface SegmentedKeyValueStorage<S> extends Closeable {
|
||||
|
||||
Set<byte[]> getAllKeysThat(S segmentHandle, Predicate<byte[]> returnCondition);
|
||||
|
||||
Set<byte[]> getAllValuesFromKeysThat(final S segmentHandle, Predicate<byte[]> returnCondition);
|
||||
|
||||
void clear(S segmentHandle);
|
||||
|
||||
/**
|
||||
|
||||
@@ -27,7 +27,10 @@ import java.util.function.Predicate;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
public class SegmentedKeyValueStorageAdapter<S> implements SnappableKeyValueStorage {
|
||||
|
||||
private final S segmentHandle;
|
||||
private final SegmentedKeyValueStorage<S> storage;
|
||||
private final Supplier<SnappedKeyValueStorage> snapshotSupplier;
|
||||
@@ -71,6 +74,16 @@ public class SegmentedKeyValueStorageAdapter<S> implements SnappableKeyValueStor
|
||||
return storage.getAllKeysThat(segmentHandle, returnCondition);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Set<byte[]> getAllValuesFromKeysThat(final Predicate<byte[]> returnCondition) {
|
||||
return storage.getAllValuesFromKeysThat(segmentHandle, returnCondition);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Stream<Pair<byte[], byte[]>> stream() {
|
||||
return storage.stream(segmentHandle);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Stream<byte[]> streamKeys() {
|
||||
return storage.streamKeys(segmentHandle);
|
||||
|
||||
@@ -1,290 +0,0 @@
|
||||
/*
|
||||
* Copyright ConsenSys AG.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations under the License.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
package org.hyperledger.besu.services.tasks;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.channels.FileChannel;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.StandardOpenOption;
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.function.Function;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import org.apache.tuweni.bytes.Bytes;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
public class FlatFileTaskCollection<T> implements TaskCollection<T> {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(FlatFileTaskCollection.class);
|
||||
private static final long DEFAULT_FILE_ROLL_SIZE_BYTES = 1024 * 1024 * 10; // 10Mb
|
||||
static final String FILENAME_PREFIX = "tasks";
|
||||
private final Set<FlatFileTask<T>> outstandingTasks = new HashSet<>();
|
||||
|
||||
private final Path storageDirectory;
|
||||
private final Function<T, Bytes> serializer;
|
||||
private final Function<Bytes, T> deserializer;
|
||||
private final long rollWhenFileSizeExceedsBytes;
|
||||
|
||||
private final ByteBuffer lengthBuffer = ByteBuffer.allocate(Integer.BYTES);
|
||||
|
||||
private FileChannel readFileChannel;
|
||||
private FileChannel writeFileChannel;
|
||||
|
||||
private long size = 0;
|
||||
private int readFileNumber = 0;
|
||||
private int writeFileNumber = 0;
|
||||
|
||||
public FlatFileTaskCollection(
|
||||
final Path storageDirectory,
|
||||
final Function<T, Bytes> serializer,
|
||||
final Function<Bytes, T> deserializer) {
|
||||
this(storageDirectory, serializer, deserializer, DEFAULT_FILE_ROLL_SIZE_BYTES);
|
||||
}
|
||||
|
||||
FlatFileTaskCollection(
|
||||
final Path storageDirectory,
|
||||
final Function<T, Bytes> serializer,
|
||||
final Function<Bytes, T> deserializer,
|
||||
final long rollWhenFileSizeExceedsBytes) {
|
||||
this.storageDirectory = storageDirectory;
|
||||
this.serializer = serializer;
|
||||
this.deserializer = deserializer;
|
||||
this.rollWhenFileSizeExceedsBytes = rollWhenFileSizeExceedsBytes;
|
||||
writeFileChannel = openWriteFileChannel(writeFileNumber);
|
||||
readFileChannel = openReadFileChannel(readFileNumber);
|
||||
}
|
||||
|
||||
private FileChannel openReadFileChannel(final int fileNumber) {
|
||||
try {
|
||||
return FileChannel.open(
|
||||
pathForFileNumber(fileNumber),
|
||||
StandardOpenOption.DELETE_ON_CLOSE,
|
||||
StandardOpenOption.READ);
|
||||
} catch (final IOException e) {
|
||||
throw new StorageException(e);
|
||||
}
|
||||
}
|
||||
|
||||
private FileChannel openWriteFileChannel(final int fileNumber) {
|
||||
try {
|
||||
return FileChannel.open(
|
||||
pathForFileNumber(fileNumber),
|
||||
StandardOpenOption.TRUNCATE_EXISTING,
|
||||
StandardOpenOption.WRITE,
|
||||
StandardOpenOption.CREATE);
|
||||
} catch (final IOException e) {
|
||||
throw new StorageException(
|
||||
"There was a problem opening FileChannel " + pathForFileNumber(fileNumber), e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void add(final T taskData) {
|
||||
final Bytes data = serializer.apply(taskData);
|
||||
try {
|
||||
writeTaskData(data);
|
||||
size++;
|
||||
if (writeFileChannel.size() > rollWhenFileSizeExceedsBytes) {
|
||||
LOG.debug("Writing reached end of file {}", writeFileNumber);
|
||||
writeFileChannel.close();
|
||||
writeFileNumber++;
|
||||
writeFileChannel = openWriteFileChannel(writeFileNumber);
|
||||
}
|
||||
} catch (final IOException e) {
|
||||
throw new StorageException(
|
||||
"There was a problem adding to FileChannel " + pathForFileNumber(writeFileNumber), e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized Task<T> remove() {
|
||||
if (isEmpty()) {
|
||||
return null;
|
||||
}
|
||||
try {
|
||||
final ByteBuffer dataBuffer = readNextTaskData();
|
||||
final T data = deserializer.apply(Bytes.wrapByteBuffer(dataBuffer));
|
||||
final FlatFileTask<T> task = new FlatFileTask<>(this, data);
|
||||
outstandingTasks.add(task);
|
||||
size--;
|
||||
return task;
|
||||
} catch (final IOException e) {
|
||||
throw new StorageException(
|
||||
"There was a problem removing from FileChannel " + pathForFileNumber(readFileNumber), e);
|
||||
}
|
||||
}
|
||||
|
||||
private ByteBuffer readNextTaskData() throws IOException {
|
||||
final int dataLength = readDataLength();
|
||||
final ByteBuffer dataBuffer = ByteBuffer.allocate(dataLength);
|
||||
readBytes(dataBuffer, dataLength);
|
||||
return dataBuffer;
|
||||
}
|
||||
|
||||
private void writeTaskData(final Bytes data) throws IOException {
|
||||
final long offset = writeFileChannel.size();
|
||||
writeDataLength(data.size(), offset);
|
||||
writeFileChannel.write(ByteBuffer.wrap(data.toArrayUnsafe()), offset + Integer.BYTES);
|
||||
}
|
||||
|
||||
private int readDataLength() throws IOException {
|
||||
lengthBuffer.position(0);
|
||||
lengthBuffer.limit(Integer.BYTES);
|
||||
readBytes(lengthBuffer, Integer.BYTES);
|
||||
return lengthBuffer.getInt(0);
|
||||
}
|
||||
|
||||
private void writeDataLength(final int size, final long offset) throws IOException {
|
||||
lengthBuffer.position(0);
|
||||
lengthBuffer.putInt(size);
|
||||
lengthBuffer.flip();
|
||||
writeFileChannel.write(lengthBuffer, offset);
|
||||
}
|
||||
|
||||
private void readBytes(final ByteBuffer buffer, final int expectedLength) throws IOException {
|
||||
int readBytes = readFileChannel.read(buffer);
|
||||
|
||||
if (readBytes == -1 && writeFileNumber > readFileNumber) {
|
||||
LOG.debug("Reading reached end of file {}", readFileNumber);
|
||||
readFileChannel.close();
|
||||
readFileNumber++;
|
||||
readFileChannel = openReadFileChannel(readFileNumber);
|
||||
|
||||
readBytes = readFileChannel.read(buffer);
|
||||
}
|
||||
if (readBytes != expectedLength) {
|
||||
throw new IllegalStateException(
|
||||
"Task queue corrupted. Expected to read "
|
||||
+ expectedLength
|
||||
+ " bytes but only got "
|
||||
+ readBytes);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized long size() {
|
||||
return size;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized boolean isEmpty() {
|
||||
return size() == 0;
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
int getReadFileNumber() {
|
||||
return readFileNumber;
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
int getWriteFileNumber() {
|
||||
return writeFileNumber;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void clear() {
|
||||
outstandingTasks.clear();
|
||||
try {
|
||||
readFileChannel.close();
|
||||
writeFileChannel.close();
|
||||
for (int i = readFileNumber; i <= writeFileNumber; i++) {
|
||||
final File file = pathForFileNumber(i).toFile();
|
||||
if (!file.delete() && file.exists()) {
|
||||
LOG.error("Failed to delete tasks file {}", file.getAbsolutePath());
|
||||
}
|
||||
}
|
||||
readFileNumber = 0;
|
||||
writeFileNumber = 0;
|
||||
writeFileChannel = openWriteFileChannel(writeFileNumber);
|
||||
readFileChannel = openReadFileChannel(readFileNumber);
|
||||
size = 0;
|
||||
} catch (final IOException e) {
|
||||
throw new StorageException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized boolean allTasksCompleted() {
|
||||
return isEmpty() && outstandingTasks.isEmpty();
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void close() {
|
||||
try {
|
||||
readFileChannel.close();
|
||||
writeFileChannel.close();
|
||||
} catch (final IOException e) {
|
||||
throw new StorageException(e);
|
||||
}
|
||||
}
|
||||
|
||||
private Path pathForFileNumber(final int fileNumber) {
|
||||
return storageDirectory.resolve(FILENAME_PREFIX + fileNumber);
|
||||
}
|
||||
|
||||
private synchronized boolean markTaskCompleted(final FlatFileTask<T> task) {
|
||||
return outstandingTasks.remove(task);
|
||||
}
|
||||
|
||||
private synchronized void handleFailedTask(final FlatFileTask<T> task) {
|
||||
if (markTaskCompleted(task)) {
|
||||
add(task.getData());
|
||||
}
|
||||
}
|
||||
|
||||
public static class StorageException extends RuntimeException {
|
||||
StorageException(final Throwable t) {
|
||||
super(t);
|
||||
}
|
||||
|
||||
StorageException(final String m, final Throwable t) {
|
||||
super(m, t);
|
||||
}
|
||||
}
|
||||
|
||||
private static class FlatFileTask<T> implements Task<T> {
|
||||
private final AtomicBoolean completed = new AtomicBoolean(false);
|
||||
private final FlatFileTaskCollection<T> parentQueue;
|
||||
private final T data;
|
||||
|
||||
private FlatFileTask(final FlatFileTaskCollection<T> parentQueue, final T data) {
|
||||
this.parentQueue = parentQueue;
|
||||
this.data = data;
|
||||
}
|
||||
|
||||
@Override
|
||||
public T getData() {
|
||||
return data;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void markCompleted() {
|
||||
if (completed.compareAndSet(false, true)) {
|
||||
parentQueue.markTaskCompleted(this);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void markFailed() {
|
||||
if (completed.compareAndSet(false, true)) {
|
||||
parentQueue.handleFailedTask(this);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -15,7 +15,9 @@
|
||||
package org.hyperledger.besu.services.tasks;
|
||||
|
||||
import java.util.ArrayDeque;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Queue;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
@@ -85,6 +87,10 @@ public class InMemoryTaskQueue<T> implements TaskCollection<T> {
|
||||
}
|
||||
}
|
||||
|
||||
public synchronized List<T> asList() {
|
||||
return new ArrayList<>(internalQueue);
|
||||
}
|
||||
|
||||
private synchronized void handleFailedTask(final InMemoryTask<T> task) {
|
||||
if (markTaskCompleted(task)) {
|
||||
add(task.getData());
|
||||
|
||||
@@ -1,90 +0,0 @@
|
||||
/*
|
||||
* Copyright ConsenSys AG.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations under the License.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
package org.hyperledger.besu.services.tasks;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Path;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.function.Function;
|
||||
|
||||
import org.apache.tuweni.bytes.Bytes;
|
||||
import org.junit.Rule;
|
||||
import org.junit.Test;
|
||||
import org.junit.rules.TemporaryFolder;
|
||||
|
||||
public class FlatFileTaskCollectionTest
|
||||
extends AbstractTaskQueueTest<FlatFileTaskCollection<Bytes>> {
|
||||
|
||||
private static final int ROLL_SIZE = 10;
|
||||
@Rule public final TemporaryFolder folder = new TemporaryFolder();
|
||||
|
||||
@Override
|
||||
protected FlatFileTaskCollection<Bytes> createQueue() throws IOException {
|
||||
final Path dataDir = folder.newFolder().toPath();
|
||||
return createQueue(dataDir);
|
||||
}
|
||||
|
||||
private FlatFileTaskCollection<Bytes> createQueue(final Path dataDir) {
|
||||
return new FlatFileTaskCollection<>(
|
||||
dataDir, Function.identity(), Function.identity(), ROLL_SIZE);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldRollFilesWhenSizeExceeded() throws Exception {
|
||||
final Path dataDir = folder.newFolder().toPath();
|
||||
try (final FlatFileTaskCollection<Bytes> queue = createQueue(dataDir)) {
|
||||
final List<Bytes> tasks = new ArrayList<>();
|
||||
|
||||
addItem(queue, tasks, 0);
|
||||
assertThat(queue.getWriteFileNumber()).isEqualTo(0);
|
||||
int tasksInFirstFile = 1;
|
||||
while (queue.getWriteFileNumber() == 0) {
|
||||
addItem(queue, tasks, tasksInFirstFile);
|
||||
tasksInFirstFile++;
|
||||
}
|
||||
|
||||
assertThat(queue.getWriteFileNumber()).isGreaterThan(0);
|
||||
assertThat(queue.getReadFileNumber()).isEqualTo(0);
|
||||
|
||||
// Add extra items to be sure we have at least one in a later file
|
||||
addItem(queue, tasks, 123);
|
||||
addItem(queue, tasks, 124);
|
||||
|
||||
final List<Bytes> removedTasks = new ArrayList<>();
|
||||
// Read through all the items in the first file.
|
||||
for (int i = 0; i < tasksInFirstFile; i++) {
|
||||
removedTasks.add(queue.remove().getData());
|
||||
}
|
||||
|
||||
// read one more to make sure we are reading from the next file
|
||||
removedTasks.add(queue.remove().getData());
|
||||
assertThat(queue.getReadFileNumber()).isEqualTo(1);
|
||||
|
||||
// Check that all tasks were read correctly.
|
||||
removedTasks.add(queue.remove().getData());
|
||||
assertThat(queue.isEmpty()).isTrue();
|
||||
assertThat(removedTasks).isEqualTo(tasks);
|
||||
}
|
||||
}
|
||||
|
||||
private void addItem(
|
||||
final FlatFileTaskCollection<Bytes> queue, final List<Bytes> tasks, final int value) {
|
||||
tasks.add(Bytes.of(value));
|
||||
queue.add(Bytes.of(value));
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user