fix: overall perf improvements
* fix: ignore private ips * fix: performance related fixes * fix: simply event buffer * fix: default to 1 events queue shard * add: cleanup scripts * fix: comments * fix comments * fix * fix: groupmq * wip * fix: sync cachable * remove cluster names and add it behind env flag (if someone want to scale) * fix * wip * better logger * remove reqid and user agent * fix lock * remove wait_for_async_insert
This commit is contained in:
committed by
GitHub
parent
38cc53890a
commit
da59622dce
@@ -8,18 +8,21 @@ export class BaseBuffer {
|
||||
lockKey: string;
|
||||
lockTimeout = 60;
|
||||
onFlush: () => void;
|
||||
enableParallelProcessing: boolean;
|
||||
|
||||
protected bufferCounterKey: string;
|
||||
|
||||
constructor(options: {
|
||||
name: string;
|
||||
onFlush: () => Promise<void>;
|
||||
enableParallelProcessing?: boolean;
|
||||
}) {
|
||||
this.logger = createLogger({ name: options.name });
|
||||
this.name = options.name;
|
||||
this.lockKey = `lock:${this.name}`;
|
||||
this.onFlush = options.onFlush;
|
||||
this.bufferCounterKey = `${this.name}:buffer:count`;
|
||||
this.enableParallelProcessing = options.enableParallelProcessing ?? false;
|
||||
}
|
||||
|
||||
protected chunks<T>(items: T[], size: number) {
|
||||
@@ -91,6 +94,26 @@ export class BaseBuffer {
|
||||
|
||||
async tryFlush() {
|
||||
const now = performance.now();
|
||||
|
||||
// Parallel mode: No locking, multiple workers can process simultaneously
|
||||
if (this.enableParallelProcessing) {
|
||||
try {
|
||||
this.logger.debug('Processing buffer (parallel mode)...');
|
||||
await this.onFlush();
|
||||
this.logger.debug('Flush completed (parallel mode)', {
|
||||
elapsed: performance.now() - now,
|
||||
});
|
||||
} catch (error) {
|
||||
this.logger.error('Failed to process buffer (parallel mode)', {
|
||||
error,
|
||||
});
|
||||
// In parallel mode, we can't safely reset counter as other workers might be active
|
||||
// Counter will be resynced automatically by the periodic job
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// Sequential mode: Use lock to ensure only one worker processes at a time
|
||||
const lockId = generateSecureId('lock');
|
||||
const acquired = await getRedisCache().set(
|
||||
this.lockKey,
|
||||
@@ -101,7 +124,7 @@ export class BaseBuffer {
|
||||
);
|
||||
if (acquired === 'OK') {
|
||||
try {
|
||||
this.logger.info('Acquired lock. Processing buffer...', {
|
||||
this.logger.debug('Acquired lock. Processing buffer...', {
|
||||
lockId,
|
||||
});
|
||||
await this.onFlush();
|
||||
@@ -117,7 +140,7 @@ export class BaseBuffer {
|
||||
}
|
||||
} finally {
|
||||
await this.releaseLock(lockId);
|
||||
this.logger.info('Flush completed', {
|
||||
this.logger.debug('Flush completed', {
|
||||
elapsed: performance.now() - now,
|
||||
lockId,
|
||||
});
|
||||
|
||||
@@ -71,7 +71,7 @@ export class BotBuffer extends BaseBuffer {
|
||||
.decrby(this.bufferCounterKey, events.length)
|
||||
.exec();
|
||||
|
||||
this.logger.info('Processed bot events', {
|
||||
this.logger.debug('Processed bot events', {
|
||||
count: events.length,
|
||||
});
|
||||
} catch (error) {
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -12,12 +12,12 @@ export class ProfileBuffer extends BaseBuffer {
|
||||
private batchSize = process.env.PROFILE_BUFFER_BATCH_SIZE
|
||||
? Number.parseInt(process.env.PROFILE_BUFFER_BATCH_SIZE, 10)
|
||||
: 200;
|
||||
private daysToKeep = process.env.PROFILE_BUFFER_DAYS_TO_KEEP
|
||||
? Number.parseInt(process.env.PROFILE_BUFFER_DAYS_TO_KEEP, 10)
|
||||
: 7;
|
||||
private chunkSize = process.env.PROFILE_BUFFER_CHUNK_SIZE
|
||||
? Number.parseInt(process.env.PROFILE_BUFFER_CHUNK_SIZE, 10)
|
||||
: 1000;
|
||||
private ttlInSeconds = process.env.PROFILE_BUFFER_TTL_IN_SECONDS
|
||||
? Number.parseInt(process.env.PROFILE_BUFFER_TTL_IN_SECONDS, 10)
|
||||
: 60 * 60;
|
||||
|
||||
private readonly redisKey = 'profile-buffer';
|
||||
private readonly redisProfilePrefix = 'profile-cache:';
|
||||
@@ -49,7 +49,7 @@ export class ProfileBuffer extends BaseBuffer {
|
||||
profileId: profile.id,
|
||||
projectId: profile.project_id,
|
||||
});
|
||||
return (await getRedisCache().exists(cacheKey)) === 1;
|
||||
return (await this.redis.exists(cacheKey)) === 1;
|
||||
}
|
||||
|
||||
async add(profile: IClickhouseProfile, isFromEvent = false) {
|
||||
@@ -90,9 +90,6 @@ export class ProfileBuffer extends BaseBuffer {
|
||||
profile,
|
||||
});
|
||||
|
||||
const cacheTtl = profile.is_external
|
||||
? 60 * 60 * 24 * this.daysToKeep
|
||||
: 60 * 60; // 1 hour for internal profiles
|
||||
const cacheKey = this.getProfileCacheKey({
|
||||
profileId: profile.id,
|
||||
projectId: profile.project_id,
|
||||
@@ -100,7 +97,7 @@ export class ProfileBuffer extends BaseBuffer {
|
||||
|
||||
const result = await this.redis
|
||||
.multi()
|
||||
.set(cacheKey, JSON.stringify(mergedProfile), 'EX', cacheTtl)
|
||||
.set(cacheKey, JSON.stringify(mergedProfile), 'EX', this.ttlInSeconds)
|
||||
.rpush(this.redisKey, JSON.stringify(mergedProfile))
|
||||
.incr(this.bufferCounterKey)
|
||||
.llen(this.redisKey)
|
||||
@@ -120,7 +117,6 @@ export class ProfileBuffer extends BaseBuffer {
|
||||
batchSize: this.batchSize,
|
||||
});
|
||||
if (bufferLength >= this.batchSize) {
|
||||
this.logger.info('Buffer full, initiating flush');
|
||||
await this.tryFlush();
|
||||
}
|
||||
} catch (error) {
|
||||
@@ -137,18 +133,33 @@ export class ProfileBuffer extends BaseBuffer {
|
||||
projectId: profile.project_id,
|
||||
});
|
||||
|
||||
const existingProfile = await getRedisCache().get(cacheKey);
|
||||
const existingProfile = await this.fetchFromCache(
|
||||
profile.id,
|
||||
profile.project_id,
|
||||
);
|
||||
if (existingProfile) {
|
||||
const parsedProfile = getSafeJson<IClickhouseProfile>(existingProfile);
|
||||
if (parsedProfile) {
|
||||
logger.debug('Profile found in Redis');
|
||||
return parsedProfile;
|
||||
}
|
||||
logger.debug('Profile found in Redis');
|
||||
return existingProfile;
|
||||
}
|
||||
|
||||
return this.fetchFromClickhouse(profile, logger);
|
||||
}
|
||||
|
||||
public async fetchFromCache(
|
||||
profileId: string,
|
||||
projectId: string,
|
||||
): Promise<IClickhouseProfile | null> {
|
||||
const cacheKey = this.getProfileCacheKey({
|
||||
profileId,
|
||||
projectId,
|
||||
});
|
||||
const existingProfile = await this.redis.get(cacheKey);
|
||||
if (!existingProfile) {
|
||||
return null;
|
||||
}
|
||||
return getSafeJson<IClickhouseProfile>(existingProfile);
|
||||
}
|
||||
|
||||
private async fetchFromClickhouse(
|
||||
profile: IClickhouseProfile,
|
||||
logger: ILogger,
|
||||
@@ -176,7 +187,7 @@ export class ProfileBuffer extends BaseBuffer {
|
||||
|
||||
async processBuffer() {
|
||||
try {
|
||||
this.logger.info('Starting profile buffer processing');
|
||||
this.logger.debug('Starting profile buffer processing');
|
||||
const profiles = await this.redis.lrange(
|
||||
this.redisKey,
|
||||
0,
|
||||
@@ -188,7 +199,7 @@ export class ProfileBuffer extends BaseBuffer {
|
||||
return;
|
||||
}
|
||||
|
||||
this.logger.info(`Processing ${profiles.length} profiles in buffer`);
|
||||
this.logger.debug(`Processing ${profiles.length} profiles in buffer`);
|
||||
const parsedProfiles = profiles.map((p) =>
|
||||
getSafeJson<IClickhouseProfile>(p),
|
||||
);
|
||||
@@ -208,7 +219,7 @@ export class ProfileBuffer extends BaseBuffer {
|
||||
.decrby(this.bufferCounterKey, profiles.length)
|
||||
.exec();
|
||||
|
||||
this.logger.info('Successfully completed profile processing', {
|
||||
this.logger.debug('Successfully completed profile processing', {
|
||||
totalProfiles: profiles.length,
|
||||
});
|
||||
} catch (error) {
|
||||
|
||||
@@ -12,6 +12,9 @@ export class SessionBuffer extends BaseBuffer {
|
||||
private batchSize = process.env.SESSION_BUFFER_BATCH_SIZE
|
||||
? Number.parseInt(process.env.SESSION_BUFFER_BATCH_SIZE, 10)
|
||||
: 1000;
|
||||
private chunkSize = process.env.SESSION_BUFFER_CHUNK_SIZE
|
||||
? Number.parseInt(process.env.SESSION_BUFFER_CHUNK_SIZE, 10)
|
||||
: 1000;
|
||||
|
||||
private readonly redisKey = 'session-buffer';
|
||||
private redis: Redis;
|
||||
@@ -209,7 +212,7 @@ export class SessionBuffer extends BaseBuffer {
|
||||
};
|
||||
});
|
||||
|
||||
for (const chunk of this.chunks(sessions, 1000)) {
|
||||
for (const chunk of this.chunks(sessions, this.chunkSize)) {
|
||||
// Insert to ClickHouse
|
||||
await ch.insert({
|
||||
table: TABLE_NAMES.sessions,
|
||||
@@ -225,7 +228,7 @@ export class SessionBuffer extends BaseBuffer {
|
||||
.decrby(this.bufferCounterKey, events.length);
|
||||
await multi.exec();
|
||||
|
||||
this.logger.info('Processed sessions', {
|
||||
this.logger.debug('Processed sessions', {
|
||||
count: events.length,
|
||||
});
|
||||
} catch (error) {
|
||||
|
||||
Reference in New Issue
Block a user