-
Notifications
You must be signed in to change notification settings - Fork 185
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
feat(server): server info lookup cache #3808
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -9,11 +9,18 @@ import { | |
getServerInfoFactory, | ||
updateServerInfoFactory, | ||
getPublicRolesFactory, | ||
getPublicScopesFactory | ||
getPublicScopesFactory, | ||
getServerInfoFromCacheFactory, | ||
storeServerInfoInCacheFactory | ||
} from '@/modules/core/repositories/server' | ||
import { db } from '@/db/knex' | ||
import { Resolvers } from '@/modules/core/graph/generated/graphql' | ||
import { LRUCache } from 'lru-cache' | ||
import { ServerInfo } from '@/modules/core/helpers/types' | ||
|
||
const cache = new LRUCache<string, ServerInfo>({ max: 1, ttl: 60 * 1000 }) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Defining it here means we will have a separate cache (and a potentially different response) everywhere that getServerInfo is called. Better to move this into a shared location. |
||
const getServerInfoFromCache = getServerInfoFromCacheFactory({ cache }) | ||
const storeServerInfoInCache = storeServerInfoInCacheFactory({ cache }) | ||
const getServerInfo = getServerInfoFactory({ db }) | ||
const updateServerInfo = updateServerInfoFactory({ db }) | ||
const getPublicRoles = getPublicRolesFactory({ db }) | ||
|
@@ -22,7 +29,11 @@ const getPublicScopes = getPublicScopesFactory({ db }) | |
export = { | ||
Query: { | ||
async serverInfo() { | ||
return await getServerInfo() | ||
const cachedServerInfo = getServerInfoFromCache() | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why would we place this complexity in the caller? All callers will now have to implement the caching get/set calls. |
||
if (cachedServerInfo) return cachedServerInfo | ||
const serverInfo = await getServerInfo() | ||
storeServerInfoInCache({ serverInfo }) | ||
return serverInfo | ||
} | ||
}, | ||
ServerInfo: { | ||
|
@@ -58,6 +69,9 @@ export = { | |
|
||
const update = removeNullOrUndefinedKeys(args.info) | ||
await updateServerInfo(update) | ||
// we're currently going to ignore, that this should be propagated to all | ||
// backend instances, and going to rely on the TTL in the cache to propagate the changes | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We spoke last week of using Redis to minimise split-brain effects. What changed? |
||
cache.clear() | ||
return true | ||
}, | ||
serverInfoMutations: () => ({}) | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -18,6 +18,7 @@ import { | |
getServerVersion | ||
} from '@/modules/shared/helpers/envHelper' | ||
import { Knex } from 'knex' | ||
import { LRUCache } from 'lru-cache' | ||
|
||
const ServerConfig = buildTableHelper('server_config', [ | ||
'id', | ||
|
@@ -38,6 +39,21 @@ const tables = { | |
scopes: (db: Knex) => db<ScopeRecord>(Scopes.name) | ||
} | ||
|
||
const SERVER_CONFIG_CACHE_KEY = 'server_config' | ||
|
||
export const getServerInfoFromCacheFactory = | ||
({ cache }: { cache: LRUCache<string, ServerInfo> }) => | ||
() => { | ||
const serverInfo = cache.get(SERVER_CONFIG_CACHE_KEY) | ||
return serverInfo ?? null | ||
} | ||
|
||
export const storeServerInfoInCacheFactory = | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. There are no guarantees that |
||
({ cache }: { cache: LRUCache<string, ServerInfo> }) => | ||
({ serverInfo }: { serverInfo: ServerInfo }) => { | ||
cache.set(SERVER_CONFIG_CACHE_KEY, serverInfo) | ||
} | ||
|
||
export const getServerInfoFactory = | ||
(deps: { db: Knex }): GetServerInfo => | ||
async () => { | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Why use lru-cache if all we care about is ttl?
From lru-cache docs: