Skip to content
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -30,4 +30,8 @@ public interface Filter extends Predicate<CloudEvent> {
static Filter noop() {
return ce -> true;
}

default void close() {
return;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -503,6 +503,8 @@ private void recordReceived(final ConsumerRecordContext recordContext) {
public Future<Void> close() {
this.closed.set(true);

this.filter.close();

Metrics.searchEgressMeters(
meterRegistry, consumerVerticleContext.getEgress().getReference())
.forEach(meterRegistry::remove);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,28 +18,53 @@
import dev.knative.eventing.kafka.broker.dispatcher.Filter;
import io.cloudevents.CloudEvent;
import java.util.List;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.stream.Collectors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

public class AllFilter implements Filter {

private final List<Filter> filters;
private final List<FilterCounter> filters;
private static final Logger logger = LoggerFactory.getLogger(AllFilter.class);

private final ArrayBlockingQueue<Integer> indexSwapQueue;

private final FilterListOptimizer filterListOptimizer;

private final ReadWriteLock readWriteLock;

public AllFilter(List<Filter> filters) {
this.filters = filters;
this.filters = filters.stream().map(FilterCounter::new).collect(Collectors.toList());
this.indexSwapQueue = new ArrayBlockingQueue<>(1);
this.readWriteLock = new ReentrantReadWriteLock();
this.filterListOptimizer =
new FilterListOptimizer(this.readWriteLock, this.indexSwapQueue, this.filters, logger);
this.filterListOptimizer.start();
}

@Override
public boolean test(CloudEvent cloudEvent) {
logger.debug("Testing event against ALL filters. Event {}", cloudEvent);
for (Filter filter : filters) {
this.readWriteLock.readLock().lock();
for (int i = 0; i < this.filters.size(); i++) {
Filter filter = this.filters.get(i).getFilter();
if (!filter.test(cloudEvent)) {
this.indexSwapQueue.offer(i);
logger.debug("Test failed. Filter {} Event {}", filter, cloudEvent);
this.readWriteLock.readLock().unlock();
return false;
}
}
logger.debug("Test ALL filters succeeded. Event {}", cloudEvent);
this.readWriteLock.readLock().unlock();
return true;
}

@Override
public void close() {
this.filterListOptimizer.interrupt();
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -18,30 +18,54 @@
import dev.knative.eventing.kafka.broker.dispatcher.Filter;
import io.cloudevents.CloudEvent;
import java.util.List;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.stream.Collectors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

public class AnyFilter implements Filter {

private static final Logger logger = LoggerFactory.getLogger(AnyFilter.class);

private final List<Filter> filters;
private final List<FilterCounter> filters;

private final ArrayBlockingQueue<Integer> indexSwapQueue;

private final FilterListOptimizer filterListOptimizer;

private final ReadWriteLock readWriteLock;
Copy link
Member

@pierDipi pierDipi Oct 11, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Generally, Vertx doesn't like locks and blocking operations, I'm pretty sure with this implementation we will block the event loop and that causes basically to block any event delivery for a particular trigger


public AnyFilter(List<Filter> filters) {
this.filters = filters;
this.filters = filters.stream().map(FilterCounter::new).collect(Collectors.toList());
this.indexSwapQueue = new ArrayBlockingQueue<>(1);
this.readWriteLock = new ReentrantReadWriteLock();
this.filterListOptimizer =
new FilterListOptimizer(this.readWriteLock, this.indexSwapQueue, this.filters, logger);
this.filterListOptimizer.start();
}

@Override
public boolean test(CloudEvent cloudEvent) {
logger.debug("Testing event against ANY filter. Event {}", cloudEvent);

for (Filter filter : filters) {
this.readWriteLock.readLock().lock();
for (int i = 0; i < this.filters.size(); i++) {
Filter filter = this.filters.get(i).getFilter();
if (filter.test(cloudEvent)) {
this.indexSwapQueue.offer(i);
logger.debug("Test succeeded. Filter {} Event {}", filter, cloudEvent);
this.readWriteLock.readLock().unlock();
return true;
}
}
logger.debug("Test failed. All filters failed. Event {}", cloudEvent);
this.readWriteLock.readLock().unlock();
return false;
}

@Override
public void close() {
this.filterListOptimizer.interrupt();
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
/*
* Copyright © 2018 Knative Authors ([email protected])
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package dev.knative.eventing.kafka.broker.dispatcher.impl.filter.subscriptionsapi;

import dev.knative.eventing.kafka.broker.dispatcher.Filter;

public class FilterCounter {
private final Filter filter;
private int count;

public FilterCounter(Filter filter) {
this.filter = filter;
this.count = 0;
}

public Filter getFilter() {
return filter;
}

public int getCount() {
return count;
}

public int incrementCount() {
return this.count++;
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
/*
* Copyright © 2018 Knative Authors ([email protected])
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package dev.knative.eventing.kafka.broker.dispatcher.impl.filter.subscriptionsapi;

import java.util.Collections;
import java.util.List;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.locks.ReadWriteLock;
import org.slf4j.Logger;

public class FilterListOptimizer extends Thread {
Copy link
Member

@pierDipi pierDipi Oct 11, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

A thread is way bigger than a go routing and during our scalability tests we reached the maximum thread count (which can't be easily increased in many platforms), so having more threads is a bit problematic until we have loom threads I don't really recommend increasing thread count (1 per consumer group is a lot)

Copy link
Member

@pierDipi pierDipi Oct 11, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I believe what we might gain from runtime optimizations we will lose in higher memory usage and risk of blocking the Vertx event loop

private final ReadWriteLock readWriteLock;

private final ArrayBlockingQueue<Integer> indexSwapQueue;

private final List<FilterCounter> filters;

private final Logger logger;

public FilterListOptimizer(
ReadWriteLock readWriteLock,
ArrayBlockingQueue<Integer> indexSwapQueue,
List<FilterCounter> filters,
Logger logger) {
this.filters = filters;
this.indexSwapQueue = indexSwapQueue;
this.readWriteLock = readWriteLock;
this.logger = logger;
}

@Override
public void run() {
while (true) {
if (Thread.interrupted()) {
return;
}
try {
this.readWriteLock.readLock().lock();
final int swapIndex =
this.indexSwapQueue.take(); // this is the only line that throws InterruptedException
if (swapIndex != 0
&& this.filters.get(swapIndex).incrementCount()
> 2 * this.filters.get(swapIndex - 1).getCount()) {
new Thread(() -> {
this.readWriteLock.writeLock().lock();
Collections.swap(this.filters, swapIndex - 1, swapIndex);
this.readWriteLock.writeLock().unlock();
})
.start();
}
this.readWriteLock.readLock().unlock();
} catch (InterruptedException e) {
logger.debug("Filter optimizer thread was interrupted", e);
this.readWriteLock.readLock().unlock();
}
}
}
}