Skip to content

Commit

Permalink
[test] fixing test to account for sharding
Browse files Browse the repository at this point in the history
  • Loading branch information
truthbk committed Oct 29, 2020
1 parent 183d368 commit bcc863f
Show file tree
Hide file tree
Showing 5 changed files with 81 additions and 23 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -324,7 +324,7 @@ public NonBlockingStatsDClient(final String prefix, final int queueSize, String[

// similar settings, but a single worker and non-blocking.
telemetryStatsDProcessor = createProcessor(queueSize, handler, maxPacketSizeBytes,
poolSize, 1, false, 0, aggregationShards);
poolSize, 1, 1, false, 0, aggregationShards);
}

this.telemetry = new Telemetry(telemetrytags, telemetryStatsDProcessor);
Expand Down Expand Up @@ -1001,7 +1001,7 @@ public NonBlockingStatsDClient(final String prefix, final int queueSize, String[

this(prefix, queueSize, constantTags, errorHandler, addressLookup, addressLookup, timeout,
bufferSize, maxPacketSizeBytes, entityID, poolSize, processorWorkers, senderWorkers,
blocking, enableTelemetry, telemetryFlushInterval, 0, 0);
DEFAULT_LOCK_SHARD_GRAIN, blocking, enableTelemetry, telemetryFlushInterval, 0, 0);
}

protected StatsDProcessor createProcessor(final int queueSize, final StatsDClientErrorHandler handler,
Expand Down
13 changes: 12 additions & 1 deletion src/main/java/com/timgroup/statsd/StatsDBlockingProcessor.java
Original file line number Diff line number Diff line change
Expand Up @@ -107,10 +107,12 @@ public void run() {
final int aggregatorFlushInterval, final int aggregatorShards) throws Exception {

super(queueSize, handler, maxPacketSizeBytes, poolSize, workers, lockShardGrain, aggregatorFlushInterval, aggregatorShards);

this.messages = new ArrayBlockingQueue[lockShardGrain];
for (int i = 0 ; i < lockShardGrain ; i++) {
this.messages[i] = new ArrayBlockingQueue<Message>(queueSize);
}

this.processorWorkQueue = new ArrayBlockingQueue[workers];
for (int i = 0 ; i < workers ; i++) {
this.processorWorkQueue[i] = new ArrayBlockingQueue<Integer>(queueSize);
Expand All @@ -126,7 +128,16 @@ protected ProcessingTask createProcessingTask(int id) {
throws Exception {

super(processor);
this.messages = new ArrayBlockingQueue<>(processor.getQcapacity());

this.messages = new ArrayBlockingQueue[lockShardGrain];
for (int i = 0 ; i < lockShardGrain ; i++) {
this.messages[i] = new ArrayBlockingQueue<Message>(getQcapacity());
}

this.processorWorkQueue = new ArrayBlockingQueue[workers];
for (int i = 0 ; i < workers ; i++) {
this.processorWorkQueue[i] = new ArrayBlockingQueue<Integer>(getQcapacity());
}
}

@Override
Expand Down
21 changes: 15 additions & 6 deletions src/main/java/com/timgroup/statsd/StatsDNonBlockingProcessor.java
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,6 @@ public class StatsDNonBlockingProcessor extends StatsDProcessor {

private class ProcessingTask extends StatsDProcessor.ProcessingTask {

private final int processorQueueId;

public ProcessingTask(int id) {
super(id);
}
Expand All @@ -28,6 +26,7 @@ public void run() {
ByteBuffer sendBuffer;
boolean empty = true;
boolean emptyHighPrio = true;
int messageQueueIdx = 0;

try {
sendBuffer = bufferPool.borrow();
Expand Down Expand Up @@ -57,7 +56,7 @@ public void run() {
message = highPrioMessages.poll();
} else {

final int messageQueueIdx = processorWorkQueue[this.processorQueueId].poll();
messageQueueIdx = processorWorkQueue[this.processorQueueId].poll();
message = messages[messageQueueIdx].poll();
}

Expand Down Expand Up @@ -126,7 +125,6 @@ public void run() {
final int aggregatorShards) throws Exception {

super(queueSize, handler, maxPacketSizeBytes, poolSize, workers, lockShardGrain, aggregatorFlushInterval, aggregatorShards);
this.qsize = new AtomicInteger(0);

this.qsize = new AtomicInteger[lockShardGrain];
this.messages = new ConcurrentLinkedQueue[lockShardGrain];
Expand All @@ -149,8 +147,19 @@ protected ProcessingTask createProcessingTask(int id) {

StatsDNonBlockingProcessor(final StatsDNonBlockingProcessor processor) throws Exception {
super(processor);
this.qsize = new AtomicInteger(0);
this.messages = new ConcurrentLinkedQueue<>();

this.qsize = new AtomicInteger[lockShardGrain];
this.messages = new ConcurrentLinkedQueue[lockShardGrain];
for (int i = 0 ; i < lockShardGrain ; i++) {
this.qsize[i] = new AtomicInteger();
this.messages[i] = new ConcurrentLinkedQueue<Message>();
this.qsize[i].set(0);
}

this.processorWorkQueue = new ConcurrentLinkedQueue[workers];
for (int i = 0 ; i < workers ; i++) {
this.processorWorkQueue[i] = new ConcurrentLinkedQueue<Integer>();
}
}

@Override
Expand Down
1 change: 1 addition & 0 deletions src/main/java/com/timgroup/statsd/StatsDProcessor.java
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,7 @@ public Thread newThread(final Runnable runnable) {

this.handler = processor.handler;
this.workers = processor.workers;
this.lockShardGrain = processor.lockShardGrain;
this.qcapacity = processor.getQcapacity();

this.executor = Executors.newFixedThreadPool(workers, new ThreadFactory() {
Expand Down
65 changes: 51 additions & 14 deletions src/test/java/com/timgroup/statsd/StatsDAggregatorTest.java
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
import java.util.Queue;
import java.util.Random;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.Callable;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ConcurrentLinkedQueue;
Expand Down Expand Up @@ -81,22 +82,42 @@ protected void writeTo(StringBuilder builder){}
// fakeProcessor store messages from the telemetry only
public static class FakeProcessor extends StatsDProcessor {

private final Queue<Message> messages;
private final Queue<Message>[] messages;
private final Queue<Integer>[] processorWorkQueue;
private final AtomicInteger[] qsize; // qSize will not reflect actual size, but a close estimate.
private final AtomicInteger messageSent = new AtomicInteger(0);
private final AtomicInteger messageAggregated = new AtomicInteger(0);

FakeProcessor(final StatsDClientErrorHandler handler) throws Exception {
super(0, handler, 0, 1, 1, 0, 0);
this.messages = new ConcurrentLinkedQueue<>();
}
super(50, handler, 0, 1, 1, 1, 0, StatsDAggregator.DEFAULT_SHARDS);

// 1 queue (lockShardGrain = 1)
this.qsize = new AtomicInteger[lockShardGrain];
this.messages = new ArrayBlockingQueue[lockShardGrain];
for (int i = 0 ; i < lockShardGrain ; i++) {
this.qsize[i] = new AtomicInteger();
this.messages[i] = new ArrayBlockingQueue<Message>(getQcapacity());
this.qsize[i].set(0);
}

// 1 worker
this.processorWorkQueue = new ArrayBlockingQueue[workers];
for (int i = 0 ; i < workers ; i++) {
this.processorWorkQueue[i] = new ArrayBlockingQueue<Integer>(getQcapacity());
}
}

private class FakeProcessingTask extends StatsDProcessor.ProcessingTask {

public FakeProcessingTask(int id) {
super(id);
}

@Override
public void run() {

while (!shutdown) {
final Message message = messages.poll();
final Message message = messages[0].poll();
if (message == null) {

try{
Expand All @@ -106,6 +127,7 @@ public void run() {
continue;
}

qsize[0].decrementAndGet();
if (aggregator.aggregateMessage(message)) {
messageAggregated.incrementAndGet();
continue;
Expand All @@ -118,23 +140,38 @@ public void run() {
}

@Override
protected StatsDProcessor.ProcessingTask createProcessingTask() {
return new FakeProcessingTask();
protected StatsDProcessor.ProcessingTask createProcessingTask(int id) {
return new FakeProcessingTask(id);
}

@Override
public boolean send(final Message msg) {
messages.offer(msg);
return true;
if (!shutdown) {
int threadId = getThreadId();
int shard = threadId % lockShardGrain;
int processQueue = threadId % workers;

if (qsize[shard].get() < qcapacity) {
messages[shard].offer(msg);
qsize[shard].incrementAndGet();
processorWorkQueue[processQueue].offer(shard);
return true;
}
}

return false;
}

public Queue<Message> getMessages() {
return messages;
public Queue<Message> getMessages(int id) {
return messages[id];
}

public void clear() {
try {
messages.clear();

for (int i = 0 ; i < lockShardGrain ; i++) {
messages[i].clear();
}
highPrioMessages.clear();
} catch (Exception e) {}
}
Expand Down Expand Up @@ -181,7 +218,7 @@ public void aggregate_messages() throws Exception {
fakeProcessor.send(new FakeAlphaMessage("some.set", Message.Type.SET, "value"));
}

waitForQueueSize(fakeProcessor.messages, 0);
waitForQueueSize(fakeProcessor.messages[0], 0);

// 10 gauges, 10 counts, 10 sets
assertEquals(30, fakeProcessor.messageAggregated.get());
Expand All @@ -206,7 +243,7 @@ public void aggregation_sharding() throws Exception {
fakeProcessor.send(gauge);
}

waitForQueueSize(fakeProcessor.messages, 0);
waitForQueueSize(fakeProcessor.messages[0], 0);

for (int i=0 ; i<StatsDAggregator.DEFAULT_SHARDS ; i++) {
Map<Message, Message> map = fakeProcessor.aggregator.aggregateMetrics.get(i);
Expand Down

0 comments on commit bcc863f

Please sign in to comment.