chore(dashboard,db): prepping for migration time

This commit is contained in:
Carl-Gerhard Lindesvärd
2024-11-28 20:58:18 +01:00
parent 9ffa213fc2
commit ab8bce7752
5 changed files with 56 additions and 7 deletions

View File

@@ -21,7 +21,7 @@ export async function healthcheck(
request: FastifyRequest,
reply: FastifyReply,
) {
const redisRes = await withTimings(getRedisCache().keys('keys op:buffer:*'));
const redisRes = await withTimings(getRedisCache().keys('op:buffer:*'));
const dbRes = await withTimings(db.project.findFirst());
const queueRes = await withTimings(eventsQueue.getCompleted());
const chRes = await withTimings(

View File

@@ -0,0 +1,18 @@
import { CalendarCogIcon } from 'lucide-react';
export default function Maintenance() {
return (
<div className="h-screen w-full center-center overflow-hidden">
<div className="relative z-10 col gap-2 center-center">
<CalendarCogIcon className="size-32 mb-4 animate-wiggle text-def-300" />
<div className="text-[150px] font-mono font-bold -mb-16 leading-[1] select-none pointer-events-none whitespace-nowrap bg-gradient-to-b from-def-300 to-def-100 bg-clip-text text-transparent">
Oh no!
</div>
<h1 className="text-6xl font-bold">Maintenance</h1>
<p className="text-xl text-muted-foreground">
We&apos;re doing a planned maintenance. Please check back later.
</p>
</div>
</div>
);
}

View File

@@ -1,4 +1,5 @@
import { clerkMiddleware, createRouteMatcher } from '@clerk/nextjs/server';
import { NextResponse } from 'next/server';
// This example protects all routes including api/trpc routes
// Please edit this to allow other routes to be public as needed.
@@ -13,6 +14,9 @@ const isPublicRoute = createRouteMatcher([
export default clerkMiddleware(
(auth, req) => {
if (!process.env.MAINTENANCE_MODE && !req.url.includes('/maintenance')) {
return NextResponse.redirect(new URL('/maintenance', req.url), 307);
}
if (!isPublicRoute(req)) {
auth().protect();
}

View File

@@ -4,7 +4,12 @@ import { toDots } from '@openpanel/common';
import { getRedisCache } from '@openpanel/redis';
import { escape } from 'sqlstring';
import { TABLE_NAMES, ch, chQuery } from '../clickhouse-client';
import {
TABLE_NAMES,
ch,
chQuery,
formatClickhouseDate,
} from '../clickhouse-client';
import { transformProfile } from '../services/profile.service';
import type {
IClickhouseProfile,
@@ -23,6 +28,15 @@ export class ProfileBuffer extends RedisBuffer<BufferType> {
super(TABLE_NAMES.profiles, BATCH_SIZE);
}
protected transformProfiles(profiles: IClickhouseProfile[]): BufferType[] {
return profiles.map((profile) => ({
...profile,
created_at: profile.created_at
? formatClickhouseDate(profile.created_at)
: '',
}));
}
// this will do a couple of things:
// - we slice the queue to maxBufferSize since this queries have a limit on character count
// - check redis cache for profiles
@@ -40,12 +54,14 @@ export class ProfileBuffer extends RedisBuffer<BufferType> {
slicedQueue.filter((_, index) => !redisProfiles[index]),
);
const toInsert = this.createProfileValues(
const profiles = this.createProfileValues(
slicedQueue,
redisProfiles,
dbProfiles,
);
const toInsert = this.transformProfiles(profiles);
if (toInsert.length > 0) {
await this.updateRedisCache(toInsert);
}

View File

@@ -2,9 +2,12 @@ import type { ResponseJSON } from '@clickhouse/client';
import { createClient } from '@clickhouse/client';
import { escape } from 'sqlstring';
import type { NodeClickHouseClientConfigOptions } from '@clickhouse/client/dist/config';
import { createLogger } from '@openpanel/logger';
import type { IInterval } from '@openpanel/validation';
export { createClient };
const logger = createLogger({ name: 'clickhouse' });
export const TABLE_NAMES = {
@@ -19,8 +22,7 @@ export const TABLE_NAMES = {
cohort_events_mv: 'cohort_events_mv',
};
export const originalCh = createClient({
url: process.env.CLICKHOUSE_URL,
export const CLICKHOUSE_OPTIONS: NodeClickHouseClientConfigOptions = {
max_open_connections: 30,
request_timeout: 30000,
keep_alive: {
@@ -33,14 +35,23 @@ export const originalCh = createClient({
clickhouse_settings: {
date_time_input_format: 'best_effort',
},
};
export const originalCh = createClient({
// TODO: remove this after migration
url: process.env.CLICKHOUSE_URL_CLUSTER ?? process.env.CLICKHOUSE_URL,
...CLICKHOUSE_OPTIONS,
});
const cleanQuery = (query: string) =>
query.replace(/\n/g, '').replace(/\s+/g, ' ').trim();
export const ch = new Proxy(originalCh, {
get(target, property, receiver) {
if (property === 'insert' || property === 'query') {
return async (...args: any[]) => {
const childLogger = logger.child({
query: args[0].query,
query: cleanQuery(args[0].query),
property,
});
try {
@@ -113,7 +124,7 @@ export async function chQueryWithMeta<T extends Record<string, any>>(
};
logger.info('query info', {
query,
query: cleanQuery(query),
rows: json.rows,
stats: response.statistics,
elapsed: Date.now() - start,