diff --git a/apps/api/src/controllers/healthcheck.controller.ts b/apps/api/src/controllers/healthcheck.controller.ts index 0e9d8094..a9a18957 100644 --- a/apps/api/src/controllers/healthcheck.controller.ts +++ b/apps/api/src/controllers/healthcheck.controller.ts @@ -21,7 +21,7 @@ export async function healthcheck( request: FastifyRequest, reply: FastifyReply, ) { - const redisRes = await withTimings(getRedisCache().keys('keys op:buffer:*')); + const redisRes = await withTimings(getRedisCache().keys('op:buffer:*')); const dbRes = await withTimings(db.project.findFirst()); const queueRes = await withTimings(eventsQueue.getCompleted()); const chRes = await withTimings( diff --git a/apps/dashboard/src/app/maintenance/page.tsx b/apps/dashboard/src/app/maintenance/page.tsx new file mode 100644 index 00000000..4532112d --- /dev/null +++ b/apps/dashboard/src/app/maintenance/page.tsx @@ -0,0 +1,18 @@ +import { CalendarCogIcon } from 'lucide-react'; + +export default function Maintenance() { + return ( +
+
+ +
+ Oh no! +
+

Maintenance

+

+ We're doing a planned maintenance. Please check back later. +

+
+
+ ); +} diff --git a/apps/dashboard/src/middleware.ts b/apps/dashboard/src/middleware.ts index c907252f..1f8e7f61 100644 --- a/apps/dashboard/src/middleware.ts +++ b/apps/dashboard/src/middleware.ts @@ -1,4 +1,5 @@ import { clerkMiddleware, createRouteMatcher } from '@clerk/nextjs/server'; +import { NextResponse } from 'next/server'; // This example protects all routes including api/trpc routes // Please edit this to allow other routes to be public as needed. @@ -13,6 +14,9 @@ const isPublicRoute = createRouteMatcher([ export default clerkMiddleware( (auth, req) => { + if (!process.env.MAINTENANCE_MODE && !req.url.includes('/maintenance')) { + return NextResponse.redirect(new URL('/maintenance', req.url), 307); + } if (!isPublicRoute(req)) { auth().protect(); } diff --git a/packages/db/src/buffers/profile-buffer.ts b/packages/db/src/buffers/profile-buffer.ts index da33e596..a7973069 100644 --- a/packages/db/src/buffers/profile-buffer.ts +++ b/packages/db/src/buffers/profile-buffer.ts @@ -4,7 +4,12 @@ import { toDots } from '@openpanel/common'; import { getRedisCache } from '@openpanel/redis'; import { escape } from 'sqlstring'; -import { TABLE_NAMES, ch, chQuery } from '../clickhouse-client'; +import { + TABLE_NAMES, + ch, + chQuery, + formatClickhouseDate, +} from '../clickhouse-client'; import { transformProfile } from '../services/profile.service'; import type { IClickhouseProfile, @@ -23,6 +28,15 @@ export class ProfileBuffer extends RedisBuffer { super(TABLE_NAMES.profiles, BATCH_SIZE); } + protected transformProfiles(profiles: IClickhouseProfile[]): BufferType[] { + return profiles.map((profile) => ({ + ...profile, + created_at: profile.created_at + ? formatClickhouseDate(profile.created_at) + : '', + })); + } + // this will do a couple of things: // - we slice the queue to maxBufferSize since this queries have a limit on character count // - check redis cache for profiles @@ -40,12 +54,14 @@ export class ProfileBuffer extends RedisBuffer { slicedQueue.filter((_, index) => !redisProfiles[index]), ); - const toInsert = this.createProfileValues( + const profiles = this.createProfileValues( slicedQueue, redisProfiles, dbProfiles, ); + const toInsert = this.transformProfiles(profiles); + if (toInsert.length > 0) { await this.updateRedisCache(toInsert); } diff --git a/packages/db/src/clickhouse-client.ts b/packages/db/src/clickhouse-client.ts index 2d7413f8..5b8b67a1 100644 --- a/packages/db/src/clickhouse-client.ts +++ b/packages/db/src/clickhouse-client.ts @@ -2,9 +2,12 @@ import type { ResponseJSON } from '@clickhouse/client'; import { createClient } from '@clickhouse/client'; import { escape } from 'sqlstring'; +import type { NodeClickHouseClientConfigOptions } from '@clickhouse/client/dist/config'; import { createLogger } from '@openpanel/logger'; import type { IInterval } from '@openpanel/validation'; +export { createClient }; + const logger = createLogger({ name: 'clickhouse' }); export const TABLE_NAMES = { @@ -19,8 +22,7 @@ export const TABLE_NAMES = { cohort_events_mv: 'cohort_events_mv', }; -export const originalCh = createClient({ - url: process.env.CLICKHOUSE_URL, +export const CLICKHOUSE_OPTIONS: NodeClickHouseClientConfigOptions = { max_open_connections: 30, request_timeout: 30000, keep_alive: { @@ -33,14 +35,23 @@ export const originalCh = createClient({ clickhouse_settings: { date_time_input_format: 'best_effort', }, +}; + +export const originalCh = createClient({ + // TODO: remove this after migration + url: process.env.CLICKHOUSE_URL_CLUSTER ?? process.env.CLICKHOUSE_URL, + ...CLICKHOUSE_OPTIONS, }); +const cleanQuery = (query: string) => + query.replace(/\n/g, '').replace(/\s+/g, ' ').trim(); + export const ch = new Proxy(originalCh, { get(target, property, receiver) { if (property === 'insert' || property === 'query') { return async (...args: any[]) => { const childLogger = logger.child({ - query: args[0].query, + query: cleanQuery(args[0].query), property, }); try { @@ -113,7 +124,7 @@ export async function chQueryWithMeta>( }; logger.info('query info', { - query, + query: cleanQuery(query), rows: json.rows, stats: response.statistics, elapsed: Date.now() - start,