improve(self-hosting): remove goose, custom migration, docs, remove zookeeper
This commit is contained in:
@@ -1,7 +1,7 @@
|
||||
import { type Redis, getRedisCache, runEvery } from '@openpanel/redis';
|
||||
|
||||
import { getSafeJson } from '@openpanel/common';
|
||||
import { TABLE_NAMES, ch } from '../clickhouse-client';
|
||||
import { TABLE_NAMES, ch } from '../clickhouse/client';
|
||||
import type { IClickhouseBotEvent } from '../services/event.service';
|
||||
import { BaseBuffer } from './base-buffer';
|
||||
|
||||
|
||||
@@ -1,266 +0,0 @@
|
||||
import { generateId, getSafeJson } from '@openpanel/common';
|
||||
import type { ILogger } from '@openpanel/logger';
|
||||
import { createLogger } from '@openpanel/logger';
|
||||
import { getRedisCache } from '@openpanel/redis';
|
||||
import { pathOr } from 'ramda';
|
||||
|
||||
export type Find<T, R = unknown> = (
|
||||
callback: (item: T) => boolean,
|
||||
) => Promise<R | null>;
|
||||
|
||||
export type FindMany<T, R = unknown> = (
|
||||
callback: (item: T) => boolean,
|
||||
) => Promise<R[]>;
|
||||
|
||||
export class RedisBuffer<T> {
|
||||
public name: string;
|
||||
protected prefix = 'op:buffer';
|
||||
protected bufferKey: string;
|
||||
private lockKey: string;
|
||||
protected maxBufferSize: number | null;
|
||||
protected logger: ILogger;
|
||||
|
||||
constructor(bufferName: string, maxBufferSize: number | null) {
|
||||
this.name = bufferName;
|
||||
this.bufferKey = bufferName;
|
||||
this.lockKey = `lock:${bufferName}`;
|
||||
this.maxBufferSize = maxBufferSize;
|
||||
this.logger = createLogger({ name: 'buffer' }).child({
|
||||
buffer: bufferName,
|
||||
});
|
||||
}
|
||||
|
||||
protected getKey(name?: string) {
|
||||
const key = `${this.prefix}:${this.bufferKey}`;
|
||||
if (name) {
|
||||
return `${key}:${name}`;
|
||||
}
|
||||
return key;
|
||||
}
|
||||
|
||||
async add(item: T): Promise<void> {
|
||||
try {
|
||||
this.onAdd(item);
|
||||
await getRedisCache().rpush(this.getKey(), JSON.stringify(item));
|
||||
const bufferSize = await getRedisCache().llen(this.getKey());
|
||||
|
||||
this.logger.debug(
|
||||
`Item added (${pathOr('unknown', ['id'], item)}) Current size: ${bufferSize}`,
|
||||
);
|
||||
|
||||
if (this.maxBufferSize && bufferSize >= this.maxBufferSize) {
|
||||
await this.tryFlush();
|
||||
}
|
||||
} catch (error) {
|
||||
this.logger.error('Failed to add item to buffer', { error, item });
|
||||
}
|
||||
}
|
||||
|
||||
public async tryFlush(): Promise<void> {
|
||||
const lockId = generateId();
|
||||
const acquired = await getRedisCache().set(
|
||||
this.lockKey,
|
||||
lockId,
|
||||
'EX',
|
||||
60,
|
||||
'NX',
|
||||
);
|
||||
|
||||
if (acquired === 'OK') {
|
||||
this.logger.info(`Lock acquired. Attempting to flush. ID: ${lockId}`);
|
||||
try {
|
||||
await this.flush();
|
||||
} catch (error) {
|
||||
this.logger.error(`Failed to flush buffer. ID: ${lockId}`, { error });
|
||||
} finally {
|
||||
this.logger.info(`Releasing lock. ID: ${lockId}`);
|
||||
await this.releaseLock(lockId);
|
||||
}
|
||||
} else {
|
||||
this.logger.warn(`Failed to acquire lock. Skipping flush. ID: ${lockId}`);
|
||||
}
|
||||
}
|
||||
|
||||
protected async waitForReleasedLock(
|
||||
maxWaitTime = 8000,
|
||||
checkInterval = 250,
|
||||
): Promise<boolean> {
|
||||
const startTime = performance.now();
|
||||
|
||||
while (performance.now() - startTime < maxWaitTime) {
|
||||
const lock = await getRedisCache().get(this.lockKey);
|
||||
if (!lock) {
|
||||
return true;
|
||||
}
|
||||
|
||||
await new Promise((resolve) => setTimeout(resolve, checkInterval));
|
||||
}
|
||||
|
||||
this.logger.warn('Timeout waiting for lock release');
|
||||
return false;
|
||||
}
|
||||
|
||||
private async retryOnce(cb: () => Promise<void>) {
|
||||
try {
|
||||
await cb();
|
||||
} catch (e) {
|
||||
this.logger.error(`#1 Failed to execute callback: ${cb.name}`, e);
|
||||
await new Promise((resolve) => setTimeout(resolve, 1000));
|
||||
try {
|
||||
await cb();
|
||||
} catch (e) {
|
||||
this.logger.error(`#2 Failed to execute callback: ${cb.name}`, e);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private async flush(): Promise<void> {
|
||||
// Use a transaction to ensure atomicity
|
||||
const result = await getRedisCache()
|
||||
.multi()
|
||||
.lrange(this.getKey(), 0, -1)
|
||||
.lrange(this.getKey('backup'), 0, -1)
|
||||
.del(this.getKey())
|
||||
.exec();
|
||||
|
||||
if (!result) {
|
||||
this.logger.error('No result from redis transaction', {
|
||||
result,
|
||||
});
|
||||
throw new Error('Redis transaction failed');
|
||||
}
|
||||
|
||||
const lrange = result[0];
|
||||
const lrangePrevious = result[1];
|
||||
|
||||
if (!lrange || lrange[0] instanceof Error) {
|
||||
this.logger.error('Error from lrange', {
|
||||
result,
|
||||
});
|
||||
throw new Error('Redis transaction failed');
|
||||
}
|
||||
|
||||
const items = lrange[1] as string[];
|
||||
if (
|
||||
lrangePrevious &&
|
||||
lrangePrevious[0] === null &&
|
||||
Array.isArray(lrangePrevious[1])
|
||||
) {
|
||||
items.push(...(lrangePrevious[1] as string[]));
|
||||
}
|
||||
|
||||
const parsedItems = items
|
||||
.map((item) => getSafeJson<T | null>(item) as T | null)
|
||||
.filter((item): item is T => item !== null);
|
||||
|
||||
if (parsedItems.length === 0) {
|
||||
this.logger.debug('No items to flush');
|
||||
// Clear any existing backup since we have no items to process
|
||||
await getRedisCache().del(this.getKey('backup'));
|
||||
return;
|
||||
}
|
||||
|
||||
this.logger.info(`Flushing ${parsedItems.length} items`);
|
||||
|
||||
try {
|
||||
// Create backup before processing
|
||||
await getRedisCache().del(this.getKey('backup')); // Clear any existing backup first
|
||||
await getRedisCache().lpush(
|
||||
this.getKey('backup'),
|
||||
...parsedItems.map((item) => JSON.stringify(item)),
|
||||
);
|
||||
|
||||
const { toInsert, toKeep } = await this.processItems(parsedItems);
|
||||
|
||||
if (toInsert.length) {
|
||||
await this.retryOnce(() => this.insertIntoDB(toInsert));
|
||||
this.onInsert(toInsert);
|
||||
}
|
||||
|
||||
// Add back items to keep
|
||||
if (toKeep.length > 0) {
|
||||
await getRedisCache().lpush(
|
||||
this.getKey(),
|
||||
...toKeep.map((item) => JSON.stringify(item)),
|
||||
);
|
||||
}
|
||||
|
||||
// Clear backup
|
||||
await getRedisCache().del(this.getKey('backup'));
|
||||
|
||||
this.logger.info(
|
||||
`Inserted ${toInsert.length} items into DB, kept ${toKeep.length} items in buffer`,
|
||||
{
|
||||
toInsert: toInsert.length,
|
||||
toKeep: toKeep.length,
|
||||
},
|
||||
);
|
||||
} catch (error) {
|
||||
this.logger.error('Failed to process queue while flushing buffer', {
|
||||
error,
|
||||
queueSize: parsedItems.length,
|
||||
});
|
||||
|
||||
if (parsedItems.length > 0) {
|
||||
// Add back items to keep
|
||||
this.logger.info('Adding all items back to buffer');
|
||||
await getRedisCache().lpush(
|
||||
this.getKey(),
|
||||
...parsedItems.map((item) => JSON.stringify(item)),
|
||||
);
|
||||
}
|
||||
|
||||
// Clear the backup since we're adding items back to main buffer
|
||||
await getRedisCache().del(this.getKey('backup'));
|
||||
}
|
||||
}
|
||||
|
||||
private async releaseLock(lockId: string): Promise<void> {
|
||||
this.logger.debug(`Released lock for ${this.getKey()}`);
|
||||
const script = `
|
||||
if redis.call("get", KEYS[1]) == ARGV[1] then
|
||||
return redis.call("del", KEYS[1])
|
||||
else
|
||||
return 0
|
||||
end
|
||||
`;
|
||||
await getRedisCache().eval(script, 1, this.lockKey, lockId);
|
||||
}
|
||||
|
||||
protected async getQueue(count?: number): Promise<T[]> {
|
||||
try {
|
||||
const items = await getRedisCache().lrange(this.getKey(), 0, count ?? -1);
|
||||
return items
|
||||
.map((item) => getSafeJson<T | null>(item) as T | null)
|
||||
.filter((item): item is T => item !== null);
|
||||
} catch (error) {
|
||||
this.logger.error('Failed to get queue', { error });
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
protected processItems(items: T[]): Promise<{ toInsert: T[]; toKeep: T[] }> {
|
||||
return Promise.resolve({ toInsert: items, toKeep: [] });
|
||||
}
|
||||
|
||||
protected insertIntoDB(_items: T[]): Promise<void> {
|
||||
throw new Error('Not implemented');
|
||||
}
|
||||
|
||||
protected onAdd(_item: T): void {
|
||||
// Override in subclass
|
||||
}
|
||||
|
||||
protected onInsert(_item: T[]): void {
|
||||
// Override in subclass
|
||||
}
|
||||
|
||||
public findMany: FindMany<T, unknown> = () => {
|
||||
return Promise.resolve([]);
|
||||
};
|
||||
|
||||
public find: Find<T, unknown> = () => {
|
||||
return Promise.resolve(null);
|
||||
};
|
||||
}
|
||||
@@ -5,7 +5,7 @@ import {
|
||||
getRedisPub,
|
||||
runEvery,
|
||||
} from '@openpanel/redis';
|
||||
import { ch } from '../clickhouse-client';
|
||||
import { ch } from '../clickhouse/client';
|
||||
import {
|
||||
type IClickhouseEvent,
|
||||
type IServiceEvent,
|
||||
|
||||
@@ -3,7 +3,7 @@ import { getSafeJson } from '@openpanel/common';
|
||||
import { type Redis, getRedisCache } from '@openpanel/redis';
|
||||
import { dissocPath, mergeDeepRight, omit, whereEq } from 'ramda';
|
||||
|
||||
import { TABLE_NAMES, ch, chQuery } from '../clickhouse-client';
|
||||
import { TABLE_NAMES, ch, chQuery } from '../clickhouse/client';
|
||||
import type { IClickhouseProfile } from '../services/profile.service';
|
||||
import { BaseBuffer } from './base-buffer';
|
||||
import { isPartialMatch } from './partial-json-match';
|
||||
|
||||
454
packages/db/src/clickhouse/migration.ts
Normal file
454
packages/db/src/clickhouse/migration.ts
Normal file
@@ -0,0 +1,454 @@
|
||||
import crypto from 'node:crypto';
|
||||
import { createClient } from './client';
|
||||
import { formatClickhouseDate } from './client';
|
||||
|
||||
interface CreateTableOptions {
|
||||
name: string;
|
||||
columns: string[];
|
||||
indices?: string[];
|
||||
engine?: string;
|
||||
orderBy: string[];
|
||||
partitionBy?: string;
|
||||
settings?: Record<string, string | number>;
|
||||
distributionHash: string;
|
||||
replicatedVersion: string;
|
||||
isClustered: boolean;
|
||||
}
|
||||
|
||||
interface CreateMaterializedViewOptions {
|
||||
name: string;
|
||||
tableName: string;
|
||||
query: string;
|
||||
engine?: string;
|
||||
orderBy: string[];
|
||||
partitionBy?: string;
|
||||
settings?: Record<string, string | number>;
|
||||
populate?: boolean;
|
||||
distributionHash: string;
|
||||
replicatedVersion: string;
|
||||
isClustered: boolean;
|
||||
}
|
||||
|
||||
const CLUSTER_REPLICA_PATH =
|
||||
'/clickhouse/{installation}/{cluster}/tables/{shard}/openpanel/v{replicatedVersion}/{table}';
|
||||
|
||||
const replicated = (tableName: string) => `${tableName}_replicated`;
|
||||
|
||||
export const chMigrationClient = createClient({
|
||||
url: process.env.CLICKHOUSE_URL,
|
||||
request_timeout: 3600000, // 1 hour in milliseconds
|
||||
keep_alive: {
|
||||
enabled: true,
|
||||
idle_socket_ttl: 8000,
|
||||
},
|
||||
compression: {
|
||||
request: true,
|
||||
},
|
||||
clickhouse_settings: {
|
||||
wait_end_of_query: 1,
|
||||
// Ask ClickHouse to periodically send query execution progress in HTTP headers, creating some activity in the connection.
|
||||
send_progress_in_http_headers: 1,
|
||||
// The interval of sending these progress headers. Here it is less than 60s,
|
||||
http_headers_progress_interval_ms: '50000',
|
||||
},
|
||||
});
|
||||
|
||||
export function createDatabase(name: string, isClustered: boolean) {
|
||||
if (isClustered) {
|
||||
return `CREATE DATABASE IF NOT EXISTS ${name} ON CLUSTER '{cluster}'`;
|
||||
}
|
||||
|
||||
return `CREATE DATABASE IF NOT EXISTS ${name}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates SQL statements for table creation in ClickHouse
|
||||
* Handles both clustered and non-clustered scenarios
|
||||
*/
|
||||
export function createTable({
|
||||
name: tableName,
|
||||
columns,
|
||||
indices = [],
|
||||
engine = 'MergeTree()',
|
||||
orderBy = ['tuple()'],
|
||||
partitionBy,
|
||||
settings = {},
|
||||
distributionHash,
|
||||
replicatedVersion,
|
||||
isClustered,
|
||||
}: CreateTableOptions): string[] {
|
||||
const columnDefinitions = [...columns, ...indices].join(',\n ');
|
||||
|
||||
const settingsClause = Object.entries(settings).length
|
||||
? `SETTINGS ${Object.entries(settings)
|
||||
.map(([key, value]) => `${key} = ${value}`)
|
||||
.join(', ')}`
|
||||
: '';
|
||||
|
||||
const partitionByClause = partitionBy ? `PARTITION BY ${partitionBy}` : '';
|
||||
|
||||
if (!isClustered) {
|
||||
// Non-clustered scenario: single table
|
||||
return [
|
||||
`CREATE TABLE IF NOT EXISTS ${tableName} (
|
||||
${columnDefinitions}
|
||||
)
|
||||
ENGINE = ${engine}
|
||||
${partitionByClause}
|
||||
ORDER BY (${orderBy.join(', ')})
|
||||
${settingsClause}`.trim(),
|
||||
];
|
||||
}
|
||||
|
||||
return [
|
||||
// Local replicated table
|
||||
`CREATE TABLE IF NOT EXISTS ${replicated(tableName)} ON CLUSTER '{cluster}' (
|
||||
${columnDefinitions}
|
||||
)
|
||||
ENGINE = Replicated${engine.replace(/^(.+?)\((.+?)?\)/, `$1('${CLUSTER_REPLICA_PATH.replace('{replicatedVersion}', replicatedVersion)}', '{replica}', $2)`).replace(/, \)$/, ')')}
|
||||
${partitionByClause}
|
||||
ORDER BY (${orderBy.join(', ')})
|
||||
${settingsClause}`.trim(),
|
||||
// Distributed table
|
||||
`CREATE TABLE IF NOT EXISTS ${tableName} ON CLUSTER '{cluster}' AS ${replicated(tableName)}
|
||||
ENGINE = Distributed('{cluster}', currentDatabase(), ${replicated(tableName)}, ${distributionHash})`,
|
||||
];
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates ALTER TABLE statements for adding columns
|
||||
*/
|
||||
export function addColumns(
|
||||
tableName: string,
|
||||
columns: string[],
|
||||
isClustered: boolean,
|
||||
): string[] {
|
||||
if (isClustered) {
|
||||
return columns.map(
|
||||
(col) =>
|
||||
`ALTER TABLE ${replicated(tableName)} ON CLUSTER '{cluster}' ADD COLUMN IF NOT EXISTS ${col}`,
|
||||
);
|
||||
}
|
||||
|
||||
return columns.map(
|
||||
(col) => `ALTER TABLE ${tableName} ADD COLUMN IF NOT EXISTS ${col}`,
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates ALTER TABLE statements for dropping columns
|
||||
*/
|
||||
export function dropColumns(
|
||||
tableName: string,
|
||||
columnNames: string[],
|
||||
isClustered: boolean,
|
||||
): string[] {
|
||||
if (isClustered) {
|
||||
return columnNames.map(
|
||||
(colName) =>
|
||||
`ALTER TABLE ${replicated(tableName)} ON CLUSTER '{cluster}' DROP COLUMN IF EXISTS ${colName}`,
|
||||
);
|
||||
}
|
||||
|
||||
return columnNames.map(
|
||||
(colName) => `ALTER TABLE ${tableName} DROP COLUMN IF EXISTS ${colName}`,
|
||||
);
|
||||
}
|
||||
|
||||
export async function getExistingTables() {
|
||||
try {
|
||||
const existingTablesQuery = await chMigrationClient.query({
|
||||
query: `SELECT name FROM system.tables WHERE database = 'openpanel'`,
|
||||
format: 'JSONEachRow',
|
||||
});
|
||||
return (await existingTablesQuery.json<{ name: string }>())
|
||||
.map((table) => table.name)
|
||||
.filter((table) => !table.includes('.inner_id'));
|
||||
} catch (e) {
|
||||
console.error(e);
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
export function renameTable({
|
||||
from,
|
||||
to,
|
||||
isClustered,
|
||||
}: {
|
||||
from: string;
|
||||
to: string;
|
||||
isClustered: boolean;
|
||||
}) {
|
||||
if (isClustered) {
|
||||
return [
|
||||
`RENAME TABLE ${replicated(from)} TO ${replicated(to)} ON CLUSTER '{cluster}'`,
|
||||
`RENAME TABLE ${from} TO ${to} ON CLUSTER '{cluster}'`,
|
||||
];
|
||||
}
|
||||
|
||||
return [`RENAME TABLE ${from} TO ${to}`];
|
||||
}
|
||||
|
||||
export function dropTable(tableName: string, isClustered: boolean) {
|
||||
if (isClustered) {
|
||||
return `DROP TABLE IF EXISTS ${tableName} ON CLUSTER '{cluster}'`;
|
||||
}
|
||||
|
||||
return `DROP TABLE IF EXISTS ${tableName}`;
|
||||
}
|
||||
|
||||
export function moveDataBetweenTables({
|
||||
from,
|
||||
to,
|
||||
batch,
|
||||
}: {
|
||||
from: string;
|
||||
to: string;
|
||||
batch?: {
|
||||
column: string;
|
||||
interval?: 'day' | 'week' | 'month';
|
||||
transform?: (date: Date) => string;
|
||||
endDate?: Date;
|
||||
startDate?: Date;
|
||||
};
|
||||
}): string[] {
|
||||
const sqls: string[] = [];
|
||||
|
||||
if (!batch) {
|
||||
return [`INSERT INTO ${to} SELECT * FROM ${from}`];
|
||||
}
|
||||
|
||||
// Start from today and go back 3 years
|
||||
const endDate = batch.endDate || new Date();
|
||||
if (!batch.endDate) {
|
||||
endDate.setDate(endDate.getDate() + 1); // Add 1 day to include today
|
||||
}
|
||||
const startDate = batch.startDate || new Date();
|
||||
if (!batch.startDate) {
|
||||
startDate.setFullYear(startDate.getFullYear() - 3);
|
||||
}
|
||||
|
||||
let currentDate = endDate;
|
||||
const interval = batch.interval || 'day';
|
||||
|
||||
while (currentDate > startDate) {
|
||||
const previousDate = new Date(currentDate);
|
||||
|
||||
switch (interval) {
|
||||
case 'month':
|
||||
previousDate.setMonth(previousDate.getMonth() - 1);
|
||||
break;
|
||||
case 'week':
|
||||
previousDate.setDate(previousDate.getDate() - 7);
|
||||
// Ensure we don't go below startDate
|
||||
if (previousDate < startDate) {
|
||||
previousDate.setTime(startDate.getTime());
|
||||
}
|
||||
break;
|
||||
// day
|
||||
default:
|
||||
previousDate.setDate(previousDate.getDate() - 1);
|
||||
break;
|
||||
}
|
||||
|
||||
const sql = `INSERT INTO ${to}
|
||||
SELECT * FROM ${from}
|
||||
WHERE ${batch.column} > '${batch.transform ? batch.transform(previousDate) : formatClickhouseDate(previousDate, true)}'
|
||||
AND ${batch.column} <= '${batch.transform ? batch.transform(currentDate) : formatClickhouseDate(currentDate, true)}'`;
|
||||
sqls.push(sql);
|
||||
|
||||
currentDate = previousDate;
|
||||
}
|
||||
|
||||
return sqls;
|
||||
}
|
||||
|
||||
export function createMaterializedView({
|
||||
name: tableName,
|
||||
query,
|
||||
engine = 'AggregatingMergeTree()',
|
||||
orderBy,
|
||||
partitionBy,
|
||||
settings = {},
|
||||
populate = false,
|
||||
distributionHash = 'rand()',
|
||||
replicatedVersion,
|
||||
isClustered,
|
||||
}: CreateMaterializedViewOptions): string[] {
|
||||
const settingsClause = Object.entries(settings).length
|
||||
? `SETTINGS ${Object.entries(settings)
|
||||
.map(([key, value]) => `${key} = ${value}`)
|
||||
.join(', ')}`
|
||||
: '';
|
||||
|
||||
const partitionByClause = partitionBy ? `PARTITION BY ${partitionBy}` : '';
|
||||
|
||||
// Transform query to use replicated table names in clustered mode
|
||||
const transformedQuery = query.replace(/\{(\w+)\}/g, (_, tableName) =>
|
||||
isClustered ? replicated(tableName) : tableName,
|
||||
);
|
||||
|
||||
if (!isClustered) {
|
||||
return [
|
||||
`CREATE MATERIALIZED VIEW IF NOT EXISTS ${tableName}
|
||||
ENGINE = ${engine}
|
||||
${partitionByClause}
|
||||
ORDER BY (${orderBy.join(', ')})
|
||||
${settingsClause}
|
||||
${populate ? 'POPULATE' : ''}
|
||||
AS ${transformedQuery}`.trim(),
|
||||
];
|
||||
}
|
||||
|
||||
return [
|
||||
// Replicated materialized view
|
||||
`CREATE MATERIALIZED VIEW IF NOT EXISTS ${replicated(tableName)} ON CLUSTER '{cluster}'
|
||||
ENGINE = Replicated${engine.replace(/^(.+?)\((.+?)?\)/, `$1('${CLUSTER_REPLICA_PATH.replace('{replicatedVersion}', replicatedVersion)}', '{replica}', $2)`).replace(/, \)$/, ')')}
|
||||
${partitionByClause}
|
||||
ORDER BY (${orderBy.join(', ')})
|
||||
${settingsClause}
|
||||
${populate ? 'POPULATE' : ''}
|
||||
AS ${transformedQuery}`.trim(),
|
||||
// Distributed materialized view
|
||||
`CREATE TABLE IF NOT EXISTS ${tableName} ON CLUSTER '{cluster}' AS ${replicated(tableName)}
|
||||
ENGINE = Distributed('{cluster}', currentDatabase(), ${replicated(tableName)}, ${distributionHash})`,
|
||||
];
|
||||
}
|
||||
|
||||
export function countRows(tableName: string) {
|
||||
return `SELECT count() FROM ${tableName}`;
|
||||
}
|
||||
|
||||
export async function runClickhouseMigrationCommands(sqls: string[]) {
|
||||
let abort: AbortController | undefined;
|
||||
let activeQueryId: string | undefined;
|
||||
|
||||
const handleTermination = async (signal: string) => {
|
||||
console.warn(
|
||||
`Received ${signal}. Cleaning up active queries before exit...`,
|
||||
);
|
||||
|
||||
if (abort) {
|
||||
abort.abort();
|
||||
}
|
||||
};
|
||||
|
||||
// Create bound handler functions
|
||||
const handleSigterm = () => handleTermination('SIGTERM');
|
||||
const handleSigint = () => handleTermination('SIGINT');
|
||||
|
||||
// Register handlers
|
||||
process.on('SIGTERM', handleSigterm);
|
||||
process.on('SIGINT', handleSigint);
|
||||
|
||||
try {
|
||||
for (const sql of sqls) {
|
||||
abort = new AbortController();
|
||||
let timer: NodeJS.Timeout | undefined;
|
||||
let resolve: ((value: unknown) => void) | undefined;
|
||||
activeQueryId = crypto.createHash('sha256').update(sql).digest('hex');
|
||||
|
||||
console.log('----------------------------------------');
|
||||
console.log('---| Running query | Query ID:', activeQueryId);
|
||||
console.log('---| SQL |------------------------------');
|
||||
console.log(sql);
|
||||
console.log('----------------------------------------');
|
||||
|
||||
try {
|
||||
const res = await Promise.race([
|
||||
chMigrationClient.command({
|
||||
query: sql,
|
||||
query_id: activeQueryId,
|
||||
abort_signal: abort?.signal,
|
||||
}),
|
||||
new Promise((r) => {
|
||||
resolve = r;
|
||||
let checking = false; // Add flag to prevent multiple concurrent checks
|
||||
|
||||
async function check() {
|
||||
if (checking) return; // Skip if already checking
|
||||
checking = true;
|
||||
|
||||
try {
|
||||
const res = await chMigrationClient
|
||||
.query({
|
||||
query: `SELECT
|
||||
query_id,
|
||||
elapsed,
|
||||
read_rows,
|
||||
written_rows,
|
||||
memory_usage
|
||||
FROM system.processes
|
||||
WHERE query_id = '${activeQueryId}'`,
|
||||
format: 'JSONEachRow',
|
||||
})
|
||||
.then((res) => res.json());
|
||||
|
||||
const formatMemory = (bytes: number) => {
|
||||
const units = ['B', 'KB', 'MB', 'GB'];
|
||||
let size = bytes;
|
||||
let unitIndex = 0;
|
||||
while (size >= 1024 && unitIndex < units.length - 1) {
|
||||
size /= 1024;
|
||||
unitIndex++;
|
||||
}
|
||||
return `${Math.round(size * 100) / 100}${units[unitIndex]}`;
|
||||
};
|
||||
|
||||
const formatNumber = (num: number) => {
|
||||
return num.toString().replace(/\B(?=(\d{3})+(?!\d))/g, ',');
|
||||
};
|
||||
|
||||
if (Array.isArray(res) && res.length > 0) {
|
||||
const { elapsed, read_rows, written_rows, memory_usage } =
|
||||
res[0] as any;
|
||||
console.log(
|
||||
`Progress: ${elapsed.toFixed(2)}s | Memory: ${formatMemory(memory_usage)} | Read: ${formatNumber(read_rows)} rows | Written: ${formatNumber(written_rows)} rows`,
|
||||
);
|
||||
}
|
||||
} finally {
|
||||
checking = false;
|
||||
}
|
||||
|
||||
timer = setTimeout(check, 5000); // Schedule next check after current one completes
|
||||
}
|
||||
|
||||
// Start the first check after 5 seconds
|
||||
timer = setTimeout(check, 5000);
|
||||
}),
|
||||
]);
|
||||
|
||||
if (timer) {
|
||||
clearTimeout(timer);
|
||||
}
|
||||
if (resolve) {
|
||||
resolve(res);
|
||||
}
|
||||
} catch (e) {
|
||||
console.log('Failed on query', sql);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
} catch (e) {
|
||||
if (abort) {
|
||||
abort.abort();
|
||||
}
|
||||
|
||||
if (activeQueryId) {
|
||||
try {
|
||||
await chMigrationClient.command({
|
||||
query: `KILL QUERY WHERE query_id = '${activeQueryId}'`,
|
||||
});
|
||||
console.log(`Successfully killed query ${activeQueryId}`);
|
||||
} catch (err) {
|
||||
console.error(`Failed to kill query ${activeQueryId}:`, err);
|
||||
}
|
||||
}
|
||||
|
||||
throw e;
|
||||
} finally {
|
||||
// Clean up event listeners
|
||||
process.off('SIGTERM', handleSigterm);
|
||||
process.off('SIGINT', handleSigint);
|
||||
}
|
||||
}
|
||||
@@ -13,7 +13,7 @@ import {
|
||||
TABLE_NAMES,
|
||||
formatClickhouseDate,
|
||||
toDate,
|
||||
} from '../clickhouse-client';
|
||||
} from '../clickhouse/client';
|
||||
import { createSqlBuilder } from '../sql-builder';
|
||||
|
||||
export function transformPropertyKey(property: string) {
|
||||
|
||||
@@ -12,7 +12,7 @@ import {
|
||||
chQuery,
|
||||
convertClickhouseDateToJs,
|
||||
formatClickhouseDate,
|
||||
} from '../clickhouse-client';
|
||||
} from '../clickhouse/client';
|
||||
import type { EventMeta, Prisma } from '../prisma-client';
|
||||
import { db } from '../prisma-client';
|
||||
import { createSqlBuilder } from '../sql-builder';
|
||||
|
||||
@@ -143,6 +143,10 @@ export async function connectUserToOrganization({
|
||||
throw new Error('Invite not found');
|
||||
}
|
||||
|
||||
if (process.env.ALLOW_INVITATION === 'false') {
|
||||
throw new Error('Invitations are not allowed');
|
||||
}
|
||||
|
||||
if (invite.expiresAt < new Date()) {
|
||||
throw new Error('Invite expired');
|
||||
}
|
||||
|
||||
@@ -11,7 +11,7 @@ import {
|
||||
ch,
|
||||
chQuery,
|
||||
formatClickhouseDate,
|
||||
} from '../clickhouse-client';
|
||||
} from '../clickhouse/client';
|
||||
import { createSqlBuilder } from '../sql-builder';
|
||||
|
||||
export type IProfileMetrics = {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { escape } from 'sqlstring';
|
||||
|
||||
import { TABLE_NAMES, chQuery } from '../clickhouse-client';
|
||||
import { TABLE_NAMES, chQuery } from '../clickhouse/client';
|
||||
|
||||
type IGetWeekRetentionInput = {
|
||||
projectId: string;
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { TABLE_NAMES } from './clickhouse-client';
|
||||
import { TABLE_NAMES } from './clickhouse/client';
|
||||
|
||||
export interface SqlBuilderObject {
|
||||
where: Record<string, string>;
|
||||
|
||||
Reference in New Issue
Block a user