From 4c88b25844e50ea18d660073f01a7aa6b116179e Mon Sep 17 00:00:00 2001 From: Mini256 Date: Wed, 14 Jun 2023 16:41:28 +0800 Subject: [PATCH] example: add tidb example --- .github/workflows/test-tidb.yaml | 43 +++ databases/tidb/.env.example | 10 + databases/tidb/.gitignore | 7 + databases/tidb/README.md | 143 ++++++++ databases/tidb/config/pd.toml | 86 +++++ databases/tidb/config/tidb.toml | 233 +++++++++++++ databases/tidb/config/tikv.toml | 499 ++++++++++++++++++++++++++++ databases/tidb/docker-compose.yml | 58 ++++ databases/tidb/jest.config.js | 5 + databases/tidb/package.json | 28 ++ databases/tidb/prisma/schema.prisma | 45 +++ databases/tidb/src/script.ts | 262 +++++++++++++++ databases/tidb/tests/prisma.test.ts | 295 ++++++++++++++++ databases/tidb/tsconfig.json | 13 + 14 files changed, 1727 insertions(+) create mode 100644 .github/workflows/test-tidb.yaml create mode 100644 databases/tidb/.env.example create mode 100644 databases/tidb/.gitignore create mode 100644 databases/tidb/README.md create mode 100644 databases/tidb/config/pd.toml create mode 100644 databases/tidb/config/tidb.toml create mode 100644 databases/tidb/config/tikv.toml create mode 100644 databases/tidb/docker-compose.yml create mode 100644 databases/tidb/jest.config.js create mode 100644 databases/tidb/package.json create mode 100644 databases/tidb/prisma/schema.prisma create mode 100644 databases/tidb/src/script.ts create mode 100644 databases/tidb/tests/prisma.test.ts create mode 100644 databases/tidb/tsconfig.json diff --git a/.github/workflows/test-tidb.yaml b/.github/workflows/test-tidb.yaml new file mode 100644 index 000000000000..bc7176f76f5f --- /dev/null +++ b/.github/workflows/test-tidb.yaml @@ -0,0 +1,43 @@ +name: test-tidb +on: + push: + paths: + - databases/tidb/** + branches: + - latest + - dev + - patch-dev + pull_request: + paths: + - databases/tidb/** + +env: + CI: 1 + PRISMA_TELEMETRY_INFORMATION: 'prisma-examples test-tidb.yaml' + +jobs: + test: + defaults: + run: + working-directory: databases/tidb + runs-on: ubuntu-latest + + env: + DATABASE_URL: mysql://root@127.0.0.1:4000/prisma?sslmode=disable + + steps: + - uses: actions/checkout@v3 + - name: Start a single TiDB instance with Docker + env: + TIDB_DOCKER_TAG: 'pingcap/tidb:v7.1.0' + run: | + docker pull $TIDB_DOCKER_TAG + docker run -d --name tidb --hostname tidb -p 4000:4000 -p 8080:8080 -v "${{ github.workspace }}:/app" $TIDB_DOCKER_TAG + sudo apt update && sudo apt install wait-for-it -y + wait-for-it -h localhost -p 4000 + - uses: actions/setup-node@v3 + with: + node-version: '14' + - run: npm install + - run: npx prisma migrate dev --name "init" + - run: npm run test diff --git a/databases/tidb/.env.example b/databases/tidb/.env.example new file mode 100644 index 000000000000..1f298bc50b08 --- /dev/null +++ b/databases/tidb/.env.example @@ -0,0 +1,10 @@ +# If you connect with local TiDB cluster, setup the DATABASE_URL variable as the following format. +# +# DATABASE_URL="mysql://:@:/" +# +# For example: +# +DATABASE_URL="mysql://root@localhost:4000/test" + +# If you connect with TiDB Serverless cluster, you need to add `sslaccept=strict` to the end of the connection string. +# DATABASE_URL="mysql://:@:/?sslaccept=strict diff --git a/databases/tidb/.gitignore b/databases/tidb/.gitignore new file mode 100644 index 000000000000..1a82b71c124d --- /dev/null +++ b/databases/tidb/.gitignore @@ -0,0 +1,7 @@ +node_modules/ +tidb-data/ +dist/ +*.env* +!.env.example +data/ +logs/ diff --git a/databases/tidb/README.md b/databases/tidb/README.md new file mode 100644 index 000000000000..f456203c035f --- /dev/null +++ b/databases/tidb/README.md @@ -0,0 +1,143 @@ +# TiDB example + +This example shows how to: + +- Connect Prisma to a TiDB database +- Create the database schema with raw SQL +- Populate the Prisma schema using [`prisma db pull`](https://www.prisma.io/docs/reference/api-reference/command-reference#db-pull) +- Read and write data to the database using [Prisma Client](https://www.prisma.io/client) + +The example consists of two parts: + +- `tests/prisma.test.ts`: Jest test (in TypeScript) with a variety of Prisma Client queries and assertions to showcase access patterns +- `src/script.ts`: Node.js script with queries similar to the ones in the test. + +## Prerequisites + +- Node.js installed. +- [TiUP](https://docs.pingcap.com/tidb/dev/tiup-overview#install-tiup) installed. (Optional if you using TiDB Serverless) +- [Docker](https://www.docker.com/products/docker-desktop) installed. (Optional if you using TiDB Serverless) + +> **Note:** You can also connect to a [free TiDB Serverless Cluster](https://docs.pingcap.com/tidbcloud/dev-guide-build-cluster-in-cloud). + +## 1. Download this example & install dependencies + +Download this example: + +``` +curl https://codeload.github.com/prisma/prisma-examples/tar.gz/latest | tar -xz --strip=2 prisma-examples-latest/databases/tidb +``` + +Install npm dependencies: + +``` +cd tidb +npm install +``` + +
+Alternative:Clone this repository + +Clone this repository: + +``` +git clone git@github.com:prisma/prisma-examples.git --depth=1 +``` + +Install npm dependencies: + +``` +cd prisma-examples/databases/tidb +npm install +``` + +
+ +## 2. Start a TiDB database server + +There are two approaches to setting up a TiDB database: + +1. Using a free hosted [TiDB Serverless](https://tidbcloud.com/free-tail). +2. Using TiUP cli to start a local TiDB cluster. +3. Locally with Docker using the included [`docker-compose.yml`](./docker-compose.yml) file. + +### (Option 1) Using TiDB Serverless (Recommended) + +Follow the [guide](https://docs.pingcap.com/tidbcloud/dev-guide-build-cluster-in-cloud) to create a free TiDB Serverless cluster. + +### (Option 2) Using TiDB Serverless + +Execute the following command to start a local TiDB cluster: + +```sh +tiup playground --without-monitor --tiflash 0 +``` + +### (Option 3) Start TiDB with Docker + +Run the following command from the `tidb` folder to start a TiDB Docker container: + +```sh +docker compose up -d +``` + +## 3. Configure the database connection URL + +Prisma uses the `DATABASE_URL` environment variable in `.env` in the `tidb` folder to connect to the database. + +Create the file by copying the example file: + +```sh +cp .env.example .env +``` + +Modify the `DATABASE_URL` in `.env` to point to your database server: + + +- If you're using a local TiDB cluster deployed by TiUP or Docker Compose, you can use the following connection string by default: + + ```sh + DATABASE_URL="mysql://root@localhost:4000/test" + ``` + +- If you're using a TiDB Serverless cluster, you can find the database connection information in the console and fill in the following connection string: + + **Note: You MUST to add `?sslaccept=strict` to the end of the connection string to connect to TiDB Serverless.** + + ```sh + DATABASE_URL="mysql://:@:4000/test?sslaccept=strict" + ``` + +## 4. Create the database schema in TiDB with Prisma Migrate + +Now that you have defined the `DATABASE_URL` in `.env`, you will use Prisma Migrate to create a migration file with the SQL necessary to create the database schema. + +Run the following command from the `tidb` folder: + +``` +npx prisma migrate dev --name init +``` + +You should see the following output: + +``` +Your database is now in sync with your schema. +``` + +> **Note:** The `prisma migrate dev` command will automatically generate Prisma Client for use in `script.ts`. + +## 5. Run the tests and script + +To run the test in `tests/prisma.test.ts`, run the following command: + +``` +npm run test +``` + +To run the script `src/script.ts`, run the following command: + +``` +npm run start +``` + +As a next step, explore the `script.ts` file to see how to use Prisma Client to read and write data in the database. diff --git a/databases/tidb/config/pd.toml b/databases/tidb/config/pd.toml new file mode 100644 index 000000000000..b1562a569e30 --- /dev/null +++ b/databases/tidb/config/pd.toml @@ -0,0 +1,86 @@ +# PD Configuration. + +name = "pd" +data-dir = "default.pd" + +client-urls = "http://127.0.0.1:2379" +# if not set, use ${client-urls} +advertise-client-urls = "" + +peer-urls = "http://127.0.0.1:2380" +# if not set, use ${peer-urls} +advertise-peer-urls = "" + +initial-cluster = "pd=http://127.0.0.1:2380" +initial-cluster-state = "new" + +lease = 3 +tso-save-interval = "3s" + +[security] +# Path of file that contains list of trusted SSL CAs. if set, following four settings shouldn't be empty +cacert-path = "" +# Path of file that contains X509 certificate in PEM format. +cert-path = "" +# Path of file that contains X509 key in PEM format. +key-path = "" + +[log] +level = "error" + +# log format, one of json, text, console +#format = "text" + +# disable automatic timestamps in output +#disable-timestamp = false + +# file logging +[log.file] +#filename = "" +# max log file size in MB +#max-size = 300 +# max log file keep days +#max-days = 28 +# maximum number of old log files to retain +#max-backups = 7 +# rotate log by day +#log-rotate = true + +[metric] +# prometheus client push interval, set "0s" to disable prometheus. +interval = "15s" +# prometheus pushgateway address, leaves it empty will disable prometheus. +address = "pushgateway:9091" + +[schedule] +max-merge-region-size = 0 +split-merge-interval = "1h" +max-snapshot-count = 3 +max-pending-peer-count = 16 +max-store-down-time = "30m" +leader-schedule-limit = 4 +region-schedule-limit = 4 +replica-schedule-limit = 8 +merge-schedule-limit = 8 +tolerant-size-ratio = 5.0 + +# customized schedulers, the format is as below +# if empty, it will use balance-leader, balance-region, hot-region as default +# [[schedule.schedulers]] +# type = "evict-leader" +# args = ["1"] + +[replication] +# The number of replicas for each region. +max-replicas = 3 +# The label keys specified the location of a store. +# The placement priorities is implied by the order of label keys. +# For example, ["zone", "rack"] means that we should place replicas to +# different zones first, then to different racks if we don't have enough zones. +location-labels = [] + +[label-property] +# Do not assign region leaders to stores that have these tags. +# [[label-property.reject-leader]] +# key = "zone" +# value = "cn1 diff --git a/databases/tidb/config/tidb.toml b/databases/tidb/config/tidb.toml new file mode 100644 index 000000000000..9b0ff24c4218 --- /dev/null +++ b/databases/tidb/config/tidb.toml @@ -0,0 +1,233 @@ +# TiDB Configuration. + +# TiDB server host. +host = "0.0.0.0" + +# TiDB server port. +port = 4000 + +# Registered store name, [tikv, mocktikv] +store = "tikv" + +# TiDB storage path. +path = "/tmp/tidb" + +# The socket file to use for connection. +socket = "" + +# Schema lease duration, very dangerous to change only if you know what you do. +lease = "0" + +# When create table, split a separated region for it. It is recommended to +# turn off this option if there will be a large number of tables created. +split-table = true + +# The limit of concurrent executed sessions. +token-limit = 1000 + +# Only print a log when out of memory quota. +# Valid options: ["log", "cancel"] +oom-action = "log" + +# Set the memory quota for a query in bytes. Default: 32GB +mem-quota-query = 34359738368 + +# Enable coprocessor streaming. +enable-streaming = false + +# Set system variable 'lower_case_table_names' +lower-case-table-names = 2 + +[log] +# Log level: debug, info, warn, error, fatal. +level = "error" + +# Log format, one of json, text, console. +format = "text" + +# Disable automatic timestamp in output +disable-timestamp = false + +# Stores slow query log into separated files. +slow-query-file = "" + +# Queries with internal result greater than this value will be logged. +expensive-threshold = 10000 + +# Maximum query length recorded in log. +query-log-max-len = 2048 + +# File logging. +[log.file] +# Log file name. +filename = "" + +# Max log file size in MB (upper limit to 4096MB). +max-size = 300 + +# Max log file keep days. No clean up by default. +max-days = 0 + +# Maximum number of old log files to retain. No clean up by default. +max-backups = 0 + +# Rotate log by day +log-rotate = true + +[security] +# Path of file that contains list of trusted SSL CAs for connection with mysql client. +ssl-ca = "" + +# Path of file that contains X509 certificate in PEM format for connection with mysql client. +ssl-cert = "" + +# Path of file that contains X509 key in PEM format for connection with mysql client. +ssl-key = "" + +# Path of file that contains list of trusted SSL CAs for connection with cluster components. +cluster-ssl-ca = "" + +# Path of file that contains X509 certificate in PEM format for connection with cluster components. +cluster-ssl-cert = "" + +# Path of file that contains X509 key in PEM format for connection with cluster components. +cluster-ssl-key = "" + +[status] +# If enable status report HTTP service. +report-status = true + +# TiDB status port. +status-port = 10080 + +# Prometheus pushgateway address, leaves it empty will disable prometheus push. +metrics-addr = "pushgateway:9091" + +# Prometheus client push interval in second, set \"0\" to disable prometheus push. +metrics-interval = 15 + +[performance] +# Max CPUs to use, 0 use number of CPUs in the machine. +max-procs = 0 +# StmtCountLimit limits the max count of statement inside a transaction. +stmt-count-limit = 5000 + +# Set keep alive option for tcp connection. +tcp-keep-alive = true + +# The maximum number of retries when commit a transaction. +retry-limit = 10 + +# Whether support cartesian product. +cross-join = true + +# Stats lease duration, which influences the time of analyze and stats load. +stats-lease = "3s" + +# Run auto analyze worker on this tidb-server. +run-auto-analyze = true + +# Probability to use the query feedback to update stats, 0 or 1 for always false/true. +feedback-probability = 0.0 + +# The max number of query feedback that cache in memory. +query-feedback-limit = 1024 + +# Pseudo stats will be used if the ratio between the modify count and +# row count in statistics of a table is greater than it. +pseudo-estimate-ratio = 0.7 + +[proxy-protocol] +# PROXY protocol acceptable client networks. +# Empty string means disable PROXY protocol, * means all networks. +networks = "" + +# PROXY protocol header read timeout, unit is second +header-timeout = 5 + +[plan-cache] +enabled = false +capacity = 2560 +shards = 256 + +[prepared-plan-cache] +enabled = false +capacity = 100 + +[opentracing] +# Enable opentracing. +enable = false + +# Whether to enable the rpc metrics. +rpc-metrics = false + +[opentracing.sampler] +# Type specifies the type of the sampler: const, probabilistic, rateLimiting, or remote +type = "const" + +# Param is a value passed to the sampler. +# Valid values for Param field are: +# - for "const" sampler, 0 or 1 for always false/true respectively +# - for "probabilistic" sampler, a probability between 0 and 1 +# - for "rateLimiting" sampler, the number of spans per second +# - for "remote" sampler, param is the same as for "probabilistic" +# and indicates the initial sampling rate before the actual one +# is received from the mothership +param = 1.0 + +# SamplingServerURL is the address of jaeger-agent's HTTP sampling server +sampling-server-url = "" + +# MaxOperations is the maximum number of operations that the sampler +# will keep track of. If an operation is not tracked, a default probabilistic +# sampler will be used rather than the per operation specific sampler. +max-operations = 0 + +# SamplingRefreshInterval controls how often the remotely controlled sampler will poll +# jaeger-agent for the appropriate sampling strategy. +sampling-refresh-interval = 0 + +[opentracing.reporter] +# QueueSize controls how many spans the reporter can keep in memory before it starts dropping +# new spans. The queue is continuously drained by a background go-routine, as fast as spans +# can be sent out of process. +queue-size = 0 + +# BufferFlushInterval controls how often the buffer is force-flushed, even if it's not full. +# It is generally not useful, as it only matters for very low traffic services. +buffer-flush-interval = 0 + +# LogSpans, when true, enables LoggingReporter that runs in parallel with the main reporter +# and logs all submitted spans. Main Configuration.Logger must be initialized in the code +# for this option to have any effect. +log-spans = false + +# LocalAgentHostPort instructs reporter to send spans to jaeger-agent at this address +local-agent-host-port = "" + +[tikv-client] +# Max gRPC connections that will be established with each tikv-server. +grpc-connection-count = 16 + +# After a duration of this time in seconds if the client doesn't see any activity it pings +# the server to see if the transport is still alive. +grpc-keepalive-time = 10 + +# After having pinged for keepalive check, the client waits for a duration of Timeout in seconds +# and if no activity is seen even after that the connection is closed. +grpc-keepalive-timeout = 3 + +# max time for commit command, must be twice bigger than raft election timeout. +commit-timeout = "41s" + +[binlog] + +# Socket file to write binlog. +binlog-socket = "" + +# WriteTimeout specifies how long it will wait for writing binlog to pump. +write-timeout = "15s" + +# If IgnoreError is true, when writting binlog meets error, TiDB would stop writting binlog, +# but still provide service. +ignore-error = false diff --git a/databases/tidb/config/tikv.toml b/databases/tidb/config/tikv.toml new file mode 100644 index 000000000000..b2e59c398ef8 --- /dev/null +++ b/databases/tidb/config/tikv.toml @@ -0,0 +1,499 @@ +# TiKV config template +# Human-readable big numbers: +# File size(based on byte): KB, MB, GB, TB, PB +# e.g.: 1_048_576 = "1MB" +# Time(based on ms): ms, s, m, h +# e.g.: 78_000 = "1.3m" + +# log level: trace, debug, info, warn, error, off. +[log] +level = "error" +# file to store log, write to stderr if it's empty. +[log.file] +# filename = "" + +[readpool.storage] +# size of thread pool for high-priority operations +# high-concurrency = 4 +# size of thread pool for normal-priority operations +# normal-concurrency = 4 +# size of thread pool for low-priority operations +# low-concurrency = 4 +# max running high-priority operations, reject if exceed +# max-tasks-high = 8000 +# max running normal-priority operations, reject if exceed +# max-tasks-normal = 8000 +# max running low-priority operations, reject if exceed +# max-tasks-low = 8000 +# size of stack size for each thread pool +# stack-size = "10MB" + +[readpool.coprocessor] +# Notice: if CPU_NUM > 8, default thread pool size for coprocessors +# will be set to CPU_NUM * 0.8. + +# high-concurrency = 8 +# normal-concurrency = 8 +# low-concurrency = 8 +# max-tasks-high = 16000 +# max-tasks-normal = 16000 +# max-tasks-low = 16000 +# stack-size = "10MB" + +[server] +# set listening address. +# addr = "127.0.0.1:20160" +# set advertise listening address for client communication, if not set, use addr instead. +# advertise-addr = "" +# notify capacity, 40960 is suitable for about 7000 regions. +# notify-capacity = 40960 +# maximum number of messages can be processed in one tick. +# messages-per-tick = 4096 + +# compression type for grpc channel, available values are no, deflate and gzip. +# grpc-compression-type = "no" +# size of thread pool for grpc server. +# grpc-concurrency = 4 +# The number of max concurrent streams/requests on a client connection. +# grpc-concurrent-stream = 1024 +# The number of connections with each tikv server to send raft messages. +# grpc-raft-conn-num = 10 +# Amount to read ahead on individual grpc streams. +# grpc-stream-initial-window-size = "2MB" + +# How many snapshots can be sent concurrently. +# concurrent-send-snap-limit = 32 +# How many snapshots can be recv concurrently. +# concurrent-recv-snap-limit = 32 + +# max count of tasks being handled, new tasks will be rejected. +# end-point-max-tasks = 2000 + +# max recursion level allowed when decoding dag expression +# end-point-recursion-limit = 1000 + +# max time to handle coprocessor request before timeout +# end-point-request-max-handle-duration = "60s" + +# the max bytes that snapshot can be written to disk in one second, +# should be set based on your disk performance +# snap-max-write-bytes-per-sec = "100MB" + +# set attributes about this server, e.g. { zone = "us-west-1", disk = "ssd" }. +# labels = {} + +[storage] +# set the path to rocksdb directory. +# data-dir = "/tmp/tikv/store" + +# notify capacity of scheduler's channel +# scheduler-notify-capacity = 10240 + +# maximum number of messages can be processed in one tick +# scheduler-messages-per-tick = 1024 + +# the number of slots in scheduler latches, concurrency control for write. +# scheduler-concurrency = 2048000 + +# scheduler's worker pool size, should increase it in heavy write cases, +# also should less than total cpu cores. +# scheduler-worker-pool-size = 4 + +# When the pending write bytes exceeds this threshold, +# the "scheduler too busy" error is displayed. +# scheduler-pending-write-threshold = "100MB" + +[pd] +# pd endpoints +# endpoints = [] + +[metric] +# the Prometheus client push interval. Setting the value to 0s stops Prometheus client from pushing. +# interval = "15s" +# the Prometheus pushgateway address. Leaving it empty stops Prometheus client from pushing. +address = "pushgateway:9091" +# the Prometheus client push job name. Note: A node id will automatically append, e.g., "tikv_1". +# job = "tikv" + +[raftstore] +# true (default value) for high reliability, this can prevent data loss when power failure. +# sync-log = true + +# set the path to raftdb directory, default value is data-dir/raft +# raftdb-path = "" + +# set store capacity, if no set, use disk capacity. +# capacity = 0 + +# notify capacity, 40960 is suitable for about 7000 regions. +# notify-capacity = 40960 + +# maximum number of messages can be processed in one tick. +# messages-per-tick = 4096 + +# Region heartbeat tick interval for reporting to pd. +# pd-heartbeat-tick-interval = "60s" +# Store heartbeat tick interval for reporting to pd. +# pd-store-heartbeat-tick-interval = "10s" + +# When region size changes exceeds region-split-check-diff, we should check +# whether the region should be split or not. +# region-split-check-diff = "6MB" + +# Interval to check region whether need to be split or not. +# split-region-check-tick-interval = "10s" + +# When raft entry exceed the max size, reject to propose the entry. +# raft-entry-max-size = "8MB" + +# Interval to gc unnecessary raft log. +# raft-log-gc-tick-interval = "10s" +# A threshold to gc stale raft log, must >= 1. +# raft-log-gc-threshold = 50 +# When entry count exceed this value, gc will be forced trigger. +# raft-log-gc-count-limit = 72000 +# When the approximate size of raft log entries exceed this value, gc will be forced trigger. +# It's recommanded to set it to 3/4 of region-split-size. +# raft-log-gc-size-limit = "72MB" + +# When a peer hasn't been active for max-peer-down-duration, +# we will consider this peer to be down and report it to pd. +# max-peer-down-duration = "5m" + +# Interval to check whether start manual compaction for a region, +# region-compact-check-interval = "5m" +# Number of regions for each time to check. +# region-compact-check-step = 100 +# The minimum number of delete tombstones to trigger manual compaction. +# region-compact-min-tombstones = 10000 +# Interval to check whether should start a manual compaction for lock column family, +# if written bytes reach lock-cf-compact-threshold for lock column family, will fire +# a manual compaction for lock column family. +# lock-cf-compact-interval = "10m" +# lock-cf-compact-bytes-threshold = "256MB" + +# Interval (s) to check region whether the data are consistent. +# consistency-check-interval = 0 + +# Use delete range to drop a large number of continuous keys. +# use-delete-range = false + +# delay time before deleting a stale peer +# clean-stale-peer-delay = "10m" + +# Interval to cleanup import sst files. +# cleanup-import-sst-interval = "10m" + +[coprocessor] +# When it is true, it will try to split a region with table prefix if +# that region crosses tables. It is recommended to turn off this option +# if there will be a large number of tables created. +# split-region-on-table = true +# When the region's size exceeds region-max-size, we will split the region +# into two which the left region's size will be region-split-size or a little +# bit smaller. +# region-max-size = "144MB" +# region-split-size = "96MB" + +[rocksdb] +# Maximum number of concurrent background jobs (compactions and flushes) +# max-background-jobs = 8 + +# This value represents the maximum number of threads that will concurrently perform a +# compaction job by breaking it into multiple, smaller ones that are run simultaneously. +# Default: 1 (i.e. no subcompactions) +# max-sub-compactions = 1 + +# Number of open files that can be used by the DB. You may need to +# increase this if your database has a large working set. Value -1 means +# files opened are always kept open. You can estimate number of files based +# on target_file_size_base and target_file_size_multiplier for level-based +# compaction. +# If max-open-files = -1, RocksDB will prefetch index and filter blocks into +# block cache at startup, so if your database has a large working set, it will +# take several minutes to open the db. +max-open-files = 1024 + +# Max size of rocksdb's MANIFEST file. +# For detailed explanation please refer to https://github.com/facebook/rocksdb/wiki/MANIFEST +# max-manifest-file-size = "20MB" + +# If true, the database will be created if it is missing. +# create-if-missing = true + +# rocksdb wal recovery mode +# 0 : TolerateCorruptedTailRecords, tolerate incomplete record in trailing data on all logs; +# 1 : AbsoluteConsistency, We don't expect to find any corruption in the WAL; +# 2 : PointInTimeRecovery, Recover to point-in-time consistency; +# 3 : SkipAnyCorruptedRecords, Recovery after a disaster; +# wal-recovery-mode = 2 + +# rocksdb write-ahead logs dir path +# This specifies the absolute dir path for write-ahead logs (WAL). +# If it is empty, the log files will be in the same dir as data. +# When you set the path to rocksdb directory in memory like in /dev/shm, you may want to set +# wal-dir to a directory on a persistent storage. +# See https://github.com/facebook/rocksdb/wiki/How-to-persist-in-memory-RocksDB-database +# wal-dir = "/tmp/tikv/store" + +# The following two fields affect how archived write-ahead logs will be deleted. +# 1. If both set to 0, logs will be deleted asap and will not get into the archive. +# 2. If wal-ttl-seconds is 0 and wal-size-limit is not 0, +# WAL files will be checked every 10 min and if total size is greater +# then wal-size-limit, they will be deleted starting with the +# earliest until size_limit is met. All empty files will be deleted. +# 3. If wal-ttl-seconds is not 0 and wal-size-limit is 0, then +# WAL files will be checked every wal-ttl-seconds / 2 and those that +# are older than wal-ttl-seconds will be deleted. +# 4. If both are not 0, WAL files will be checked every 10 min and both +# checks will be performed with ttl being first. +# When you set the path to rocksdb directory in memory like in /dev/shm, you may want to set +# wal-ttl-seconds to a value greater than 0 (like 86400) and backup your db on a regular basis. +# See https://github.com/facebook/rocksdb/wiki/How-to-persist-in-memory-RocksDB-database +# wal-ttl-seconds = 0 +# wal-size-limit = 0 + +# rocksdb max total wal size +# max-total-wal-size = "4GB" + +# Rocksdb Statistics provides cumulative stats over time. +# Turn statistics on will introduce about 5%-10% overhead for RocksDB, +# but it is worthy to know the internal status of RocksDB. +# enable-statistics = true + +# Dump statistics periodically in information logs. +# Same as rocksdb's default value (10 min). +# stats-dump-period = "10m" + +# Due to Rocksdb FAQ: https://github.com/facebook/rocksdb/wiki/RocksDB-FAQ, +# If you want to use rocksdb on multi disks or spinning disks, you should set value at +# least 2MB; +# compaction-readahead-size = 0 + +# This is the maximum buffer size that is used by WritableFileWrite +# writable-file-max-buffer-size = "1MB" + +# Use O_DIRECT for both reads and writes in background flush and compactions +# use-direct-io-for-flush-and-compaction = false + +# Limit the disk IO of compaction and flush. Compaction and flush can cause +# terrible spikes if they exceed a certain threshold. Consider setting this to +# 50% ~ 80% of the disk throughput for a more stable result. However, in heavy +# write workload, limiting compaction and flush speed can cause write stalls too. +# rate-bytes-per-sec = 0 + +# Enable or disable the pipelined write +# enable-pipelined-write = true + +# Allows OS to incrementally sync files to disk while they are being +# written, asynchronously, in the background. +# bytes-per-sync = "0MB" + +# Allows OS to incrementally sync WAL to disk while it is being written. +# wal-bytes-per-sync = "0KB" + +# Specify the maximal size of the Rocksdb info log file. If the log file +# is larger than `max_log_file_size`, a new info log file will be created. +# If max_log_file_size == 0, all logs will be written to one log file. +# Default: 1GB +# info-log-max-size = "1GB" + +# Time for the Rocksdb info log file to roll (in seconds). +# If specified with non-zero value, log file will be rolled +# if it has been active longer than `log_file_time_to_roll`. +# Default: 0 (disabled) +# info-log-roll-time = "0" + +# Maximal Rocksdb info log files to be kept. +# Default: 10 +# info-log-keep-log-file-num = 10 + +# This specifies the Rocksdb info LOG dir. +# If it is empty, the log files will be in the same dir as data. +# If it is non empty, the log files will be in the specified dir, +# and the db data dir's absolute path will be used as the log file +# name's prefix. +# Default: empty +# info-log-dir = "" + +# Column Family default used to store actual data of the database. +[rocksdb.defaultcf] +# compression method (if any) is used to compress a block. +# no: kNoCompression +# snappy: kSnappyCompression +# zlib: kZlibCompression +# bzip2: kBZip2Compression +# lz4: kLZ4Compression +# lz4hc: kLZ4HCCompression +# zstd: kZSTD + +# per level compression +# compression-per-level = ["no", "no", "lz4", "lz4", "lz4", "zstd", "zstd"] + +# Approximate size of user data packed per block. Note that the +# block size specified here corresponds to uncompressed data. +# block-size = "64KB" + +# If you're doing point lookups you definitely want to turn bloom filters on, We use +# bloom filters to avoid unnecessary disk reads. Default bits_per_key is 10, which +# yields ~1% false positive rate. Larger bits_per_key values will reduce false positive +# rate, but increase memory usage and space amplification. +# bloom-filter-bits-per-key = 10 + +# false means one sst file one bloom filter, true means evry block has a corresponding bloom filter +# block-based-bloom-filter = false + +# level0-file-num-compaction-trigger = 4 + +# Soft limit on number of level-0 files. We start slowing down writes at this point. +# level0-slowdown-writes-trigger = 20 + +# Maximum number of level-0 files. We stop writes at this point. +# level0-stop-writes-trigger = 36 + +# Amount of data to build up in memory (backed by an unsorted log +# on disk) before converting to a sorted on-disk file. +# write-buffer-size = "128MB" + +# The maximum number of write buffers that are built up in memory. +# max-write-buffer-number = 5 + +# The minimum number of write buffers that will be merged together +# before writing to storage. +# min-write-buffer-number-to-merge = 1 + +# Control maximum total data size for base level (level 1). +# max-bytes-for-level-base = "512MB" + +# Target file size for compaction. +# target-file-size-base = "8MB" + +# Max bytes for compaction.max_compaction_bytes +# max-compaction-bytes = "2GB" + +# There are four different algorithms to pick files to compact. +# 0 : ByCompensatedSize +# 1 : OldestLargestSeqFirst +# 2 : OldestSmallestSeqFirst +# 3 : MinOverlappingRatio +# compaction-pri = 3 + +# block-cache used to cache uncompressed blocks, big block-cache can speed up read. +# in normal cases should tune to 30%-50% system's total memory. +# block-cache-size = "1GB" + +# Indicating if we'd put index/filter blocks to the block cache. +# If not specified, each "table reader" object will pre-load index/filter block +# during table initialization. +# cache-index-and-filter-blocks = true + +# Pin level0 filter and index blocks in cache. +# pin-l0-filter-and-index-blocks = true + +# Enable read amplication statistics. +# value => memory usage (percentage of loaded blocks memory) +# 1 => 12.50 % +# 2 => 06.25 % +# 4 => 03.12 % +# 8 => 01.56 % +# 16 => 00.78 % +# read-amp-bytes-per-bit = 0 + +# Pick target size of each level dynamically. +# dynamic-level-bytes = true + +# Options for Column Family write +# Column Family write used to store commit informations in MVCC model +[rocksdb.writecf] +# compression-per-level = ["no", "no", "lz4", "lz4", "lz4", "zstd", "zstd"] +# block-size = "64KB" +# write-buffer-size = "128MB" +# max-write-buffer-number = 5 +# min-write-buffer-number-to-merge = 1 +# max-bytes-for-level-base = "512MB" +# target-file-size-base = "8MB" + +# in normal cases should tune to 10%-30% system's total memory. +# block-cache-size = "256MB" +# level0-file-num-compaction-trigger = 4 +# level0-slowdown-writes-trigger = 20 +# level0-stop-writes-trigger = 36 +# cache-index-and-filter-blocks = true +# pin-l0-filter-and-index-blocks = true +# compaction-pri = 3 +# read-amp-bytes-per-bit = 0 +# dynamic-level-bytes = true + +[rocksdb.lockcf] +# compression-per-level = ["no", "no", "no", "no", "no", "no", "no"] +# block-size = "16KB" +# write-buffer-size = "128MB" +# max-write-buffer-number = 5 +# min-write-buffer-number-to-merge = 1 +# max-bytes-for-level-base = "128MB" +# target-file-size-base = "8MB" +# block-cache-size = "256MB" +# level0-file-num-compaction-trigger = 1 +# level0-slowdown-writes-trigger = 20 +# level0-stop-writes-trigger = 36 +# cache-index-and-filter-blocks = true +# pin-l0-filter-and-index-blocks = true +# compaction-pri = 0 +# read-amp-bytes-per-bit = 0 +# dynamic-level-bytes = true + +[raftdb] +# max-sub-compactions = 1 +max-open-files = 1024 +# max-manifest-file-size = "20MB" +# create-if-missing = true + +# enable-statistics = true +# stats-dump-period = "10m" + +# compaction-readahead-size = 0 +# writable-file-max-buffer-size = "1MB" +# use-direct-io-for-flush-and-compaction = false +# enable-pipelined-write = true +# allow-concurrent-memtable-write = false +# bytes-per-sync = "0MB" +# wal-bytes-per-sync = "0KB" + +# info-log-max-size = "1GB" +# info-log-roll-time = "0" +# info-log-keep-log-file-num = 10 +# info-log-dir = "" + +[raftdb.defaultcf] +# compression-per-level = ["no", "no", "lz4", "lz4", "lz4", "zstd", "zstd"] +# block-size = "64KB" +# write-buffer-size = "128MB" +# max-write-buffer-number = 5 +# min-write-buffer-number-to-merge = 1 +# max-bytes-for-level-base = "512MB" +# target-file-size-base = "8MB" + +# should tune to 256MB~2GB. +# block-cache-size = "256MB" +# level0-file-num-compaction-trigger = 4 +# level0-slowdown-writes-trigger = 20 +# level0-stop-writes-trigger = 36 +# cache-index-and-filter-blocks = true +# pin-l0-filter-and-index-blocks = true +# compaction-pri = 0 +# read-amp-bytes-per-bit = 0 +# dynamic-level-bytes = true + +[security] +# set the path for certificates. Empty string means disabling secure connectoins. +# ca-path = "" +# cert-path = "" +# key-path = "" + +[import] +# the directory to store importing kv data. +# import-dir = "/tmp/tikv/import" +# number of threads to handle RPC requests. +# num-threads = 8 +# stream channel window size, stream will be blocked on channel full. +# stream-channel-window = 128 diff --git a/databases/tidb/docker-compose.yml b/databases/tidb/docker-compose.yml new file mode 100644 index 000000000000..361f8e91b3b6 --- /dev/null +++ b/databases/tidb/docker-compose.yml @@ -0,0 +1,58 @@ +# https://hub.docker.com/r/pingcap/tidb + +version: '3' +services: + tidb: + image: pingcap/tidb:v7.1.0 + ports: + - "4000:4000" + - "10080:10080" + volumes: + - ./config/tidb.toml:/tidb.toml:ro + - ./logs:/logs + command: + - --store=tikv + - --path=pd:2379 + - --config=/tidb.toml + - --log-file=/logs/tidb.log + - --advertise-address=tidb + depends_on: + - "tikv" + restart: on-failure + + pd: + image: pingcap/pd:v7.1.0 + ports: + - "2379" + volumes: + - ./config/pd.toml:/pd.toml:ro + - ./data:/data + - ./logs:/logs + command: + - --name=pd + - --client-urls=http://0.0.0.0:2379 + - --peer-urls=http://0.0.0.0:2380 + - --advertise-client-urls=http://pd:2379 + - --advertise-peer-urls=http://pd:2380 + - --initial-cluster=pd=http://pd:2380 + - --data-dir=/data/pd + - --config=/pd.toml + - --log-file=/logs/pd.log + restart: on-failure + + tikv: + image: pingcap/tikv:v7.1.0 + volumes: + - ./config/tikv.toml:/tikv.toml:ro + - ./data:/data + - ./logs:/logs + command: + - --addr=0.0.0.0:20160 + - --advertise-addr=tikv:20160 + - --data-dir=/data/tikv + - --pd=pd:2379 + - --config=/tikv.toml + - --log-file=/logs/tikv.log + depends_on: + - "pd" + restart: on-failure diff --git a/databases/tidb/jest.config.js b/databases/tidb/jest.config.js new file mode 100644 index 000000000000..5378dbdf8424 --- /dev/null +++ b/databases/tidb/jest.config.js @@ -0,0 +1,5 @@ +module.exports = { + preset: 'ts-jest', + testEnvironment: 'node', + testTimeout: 20000, +} diff --git a/databases/tidb/package.json b/databases/tidb/package.json new file mode 100644 index 000000000000..106d8fde227b --- /dev/null +++ b/databases/tidb/package.json @@ -0,0 +1,28 @@ +{ + "name": "prisma-tidb-example", + "version": "1.0.0", + "keywords": [], + "license": "ISC", + "author": "Mini256", + "main": "index.js", + "scripts": { + "start": "ts-node ./src/script.ts", + "test": "jest", + "test:watch": "jest --watch" + }, + "dependencies": { + "@prisma/client": "4.15.0", + "jest": "29.5.0" + }, + "devDependencies": { + "@types/jest": "29.5.2", + "@types/node": "18.16.18", + "@types/prettyjson": "0.0.30", + "prettyjson": "1.2.5", + "prisma": "4.15.0", + "ts-jest": "29.1.0", + "ts-node": "10.9.1", + "ts-node-dev": "2.0.0", + "typescript": "4.9.5" + } +} diff --git a/databases/tidb/prisma/schema.prisma b/databases/tidb/prisma/schema.prisma new file mode 100644 index 000000000000..dee771694d48 --- /dev/null +++ b/databases/tidb/prisma/schema.prisma @@ -0,0 +1,45 @@ +generator client { + provider = "prisma-client-js" +} + +datasource db { + provider = "mysql" + url = env("DATABASE_URL") +} + +model User { + id String @id @default(uuid()) + createdAt DateTime @default(now()) + email String @unique + name String? + comments Comment[] + posts Post[] +} + +model Post { + id String @id @default(uuid()) + createdAt DateTime @default(now()) + title String + content String? + published Boolean @default(false) + authorId String + author User @relation(fields: [authorId], references: [id]) + comments Comment[] + tags Tag[] +} + +model Comment { + id String @id @default(uuid()) + createdAt DateTime @default(now()) + comment String + writtenById String + postId String + writtenBy User @relation(fields: [writtenById], references: [id]) + post Post @relation(fields: [postId], references: [id], onUpdate: NoAction) +} + +model Tag { + id String @id @default(uuid()) + tag String @unique + posts Post[] +} diff --git a/databases/tidb/src/script.ts b/databases/tidb/src/script.ts new file mode 100644 index 000000000000..7abdc2114b1f --- /dev/null +++ b/databases/tidb/src/script.ts @@ -0,0 +1,262 @@ +import { PrismaClient } from '@prisma/client' +import prettyjson from 'prettyjson' + +const prisma = new PrismaClient() + +// A `main` function so that we can use async/await +async function main() { + const tags = await Promise.all([ + prisma.tag.create({ + data: { + tag: 'Node.js', + }, + }), + prisma.tag.create({ + data: { + tag: 'TypeScript', + }, + }), + prisma.tag.create({ + data: { + tag: 'Prisma', + }, + }), + prisma.tag.create({ + data: { + tag: 'Databases', + }, + }), + prisma.tag.create({ + data: { + tag: 'TiDB', + }, + }), + prisma.tag.create({ + data: { + tag: 'Serverless', + }, + }), + ]) + + console.log('Created tags: \n', prettyjson.render(tags)) + + // Seed the database with users and posts + const user1 = await prisma.user.create({ + data: { + email: 'alice@prisma.io', + name: 'Alice', + posts: { + create: { + title: 'Productive development with Prisma and TiDB', + published: true, + tags: { + connect: [ + { + tag: 'Prisma', + }, + { + tag: 'TiDB', + }, + ], + }, + }, + }, + }, + include: { + posts: true, + }, + }) + + console.log('Created first user: \n', prettyjson.render(user1)) + + const user2 = await prisma.user.create({ + data: { + email: 'shakuntala@prisma.io', + name: 'Shakuntala', + comments: { + create: { + comment: + 'Thanks for sharing. TiDB has helped us rapidly scale our architecture with very little effort.', + post: { + connect: { + id: user1.posts[0].id, + }, + }, + }, + }, + posts: { + create: [ + { + title: 'Introducing to Prisma with TiDB', + published: true, + content: + 'Check out the Prisma blog at https://www.prisma.io/blog for more information', + tags: { + connect: [ + { + tag: 'Node.js', + }, + { + tag: 'TiDB', + }, + { + tag: 'Databases', + }, + ], + }, + }, + { + title: 'Zero cost type safety with Prisma', + published: true, + tags: { + connect: [ + { + tag: 'Node.js', + }, + { + tag: 'Databases', + }, + ], + }, + }, + { + title: 'Horizontal scaling made easy', + published: false, + }, + ], + }, + }, + include: { + posts: { + include: { + tags: true, + }, + }, + comments: { + include: { + post: true, + }, + }, + }, + }) + + console.log('Created second user: \n', prettyjson.render(user2)) + + const taggedPosts = await prisma.tag.findUnique({ + where: { + tag: 'Node.js', + }, + include: { + posts: true, + }, + }) + + console.log( + 'Retrieved all posts with the Node.js tag: ', + prettyjson.render(taggedPosts), + ) + + // Retrieve all published posts with a tag + const allPosts = await prisma.post.findMany({ + where: { + AND: [ + { published: true }, + { + tags: { + some: { + tag: 'TiDB', + }, + }, + }, + ], + }, + }) + console.log( + `Retrieved all published posts with the TiDB tag: `, + prettyjson.render(allPosts), + ) + + // Create a new post (written by an already existing user with email alice@prisma.io) + const newPost = await prisma.post.create({ + data: { + title: 'Join the Prisma Slack community', + content: 'http://slack.prisma.io', + published: false, + author: { + connect: { + email: 'alice@prisma.io', // Should have been created during initial seeding + }, + }, + tags: { + connectOrCreate: { + create: { + tag: 'Community', + }, + where: { + tag: 'Community', + }, + }, + connect: { + tag: 'Prisma', + }, + }, + comments: { + create: { + comment: 'Looking forward to joining to Prisma community.', + writtenBy: { + connect: { + email: 'shakuntala@prisma.io', + }, + }, + }, + }, + }, + include: { + comments: { + include: { + writtenBy: true, + }, + }, + tags: true, + }, + }) + console.log(`Created a new post: \n`, prettyjson.render(newPost)) + + // Publish the new post + const updatedPost = await prisma.post.update({ + where: { + id: newPost.id, + }, + data: { + published: true, + }, + }) + console.log(`Published the newly created post: `, updatedPost) + + // Retrieve all posts by user with email alice@prisma.io + const postsByUser = await prisma.user + .findUnique({ + where: { + email: 'alice@prisma.io', + }, + }) + .posts() + console.log(`Retrieved all posts from a specific user: `, postsByUser) +} + +async function clearDB() { + await prisma.tag.deleteMany({}) + await prisma.comment.deleteMany({}) + await prisma.post.deleteMany({}) + await prisma.user.deleteMany({}) +} + +clearDB() + .then(main) + .catch((e) => { + console.error(e) + process.exit(1) + }) + .finally(async () => { + await prisma.$disconnect() + }) diff --git a/databases/tidb/tests/prisma.test.ts b/databases/tidb/tests/prisma.test.ts new file mode 100644 index 000000000000..fe93819ad03f --- /dev/null +++ b/databases/tidb/tests/prisma.test.ts @@ -0,0 +1,295 @@ +import { PrismaClient } from '@prisma/client' + +export const prisma = new PrismaClient() + +describe('example test with Prisma Client', () => { + beforeAll(async () => { + await prisma.tag.deleteMany({}) + await prisma.comment.deleteMany({}) + await prisma.post.deleteMany({}) + await prisma.user.deleteMany({}) + }) + afterAll(async () => { + await prisma.$disconnect() + }) + test('test query', async () => { + const data = await prisma.user.findMany({ take: 1, select: { id: true } }) + expect(data).toBeTruthy() + }) + + test('create tags', async () => { + const tags = await Promise.all([ + prisma.tag.create({ + data: { + tag: 'Node.js', + }, + }), + prisma.tag.create({ + data: { + tag: 'TypeScript', + }, + }), + prisma.tag.create({ + data: { + tag: 'Prisma', + }, + }), + prisma.tag.create({ + data: { + tag: 'Databases', + }, + }), + prisma.tag.create({ + data: { + tag: 'TiDB', + }, + }), + prisma.tag.create({ + data: { + tag: 'Serverless', + }, + }), + ]) + + expect(tags.length).toEqual(6) + expect(typeof tags[0].id).toEqual('string') + expect(tags[0].tag).toBeTruthy() + }) + + test('create two user with posts comments and tags', async () => { + let email = 'alice@prisma.io' + let name = 'Alice' + + const user1 = await prisma.user.create({ + data: { + email, + name, + posts: { + create: { + title: 'Bringing value to users with rapid deployment', + published: true, + tags: { + connect: { + tag: 'Prisma', + }, + }, + }, + }, + }, + include: { + posts: true, + }, + }) + + expect(user1.id).toBeTruthy() + expect(user1.name).toEqual(name) + expect(user1.email).toEqual(email) + expect( + user1.createdAt.getMonth() === new Date().getMonth() && + user1.createdAt.getFullYear() === new Date().getFullYear(), + ).toBeTruthy() + expect(user1.posts.length).toEqual(1) + expect(user1.posts[0].published).toEqual(true) + + email = 'shakuntala@prisma.io' + name = 'Shakuntala' + + const user2 = await prisma.user.create({ + data: { + name, + email, + comments: { + create: { + comment: + 'Thanks for sharing. Reducing the size of our releases and deployment more frequently has allowed us to bring more value to our customers.', + post: { + connect: { + id: user1.posts[0].id, + }, + }, + }, + }, + posts: { + create: [ + { + title: 'GraphQL Authentication simplified', + published: false, + }, + { + title: 'Introducing to Prisma with TiDB', + published: true, + content: + 'Check out the Prisma blog at https://www.prisma.io/blog for more information', + tags: { + // Creates the rows in the m-n relation table + connect: [ + { + tag: 'Node.js', + }, + { + tag: 'TiDB', + }, + { + tag: 'Databases', + }, + ], + }, + }, + { + title: 'Zero cost type safety with Prisma', + published: true, + tags: { + // Creates the rows in the m-n relation table + connect: [ + { + tag: 'Node.js', + }, + { + tag: 'Databases', + }, + ], + }, + }, + ], + }, + }, + include: { + posts: { + include: { + tags: true, + }, + orderBy: { title: 'asc' }, + }, + comments: { + include: { + post: true, + }, + }, + }, + }) + + expect(user2.id).toBeTruthy() + expect(user2.name).toEqual(name) + expect(user2.email).toEqual(email) + expect(user2.posts.length).toEqual(3) + expect(user2.posts[1].authorId).toEqual(user2.id) + expect(user2.posts[1].tags[1].id).toBeTruthy() + expect( + user2.comments[0].comment.toLowerCase().includes('thanks'), + ).toBeTruthy() + expect(user2.comments[0].postId).toEqual(user1.posts[0].id) + + expect( + user2.createdAt.getMonth() === new Date().getMonth() && + user2.createdAt.getFullYear() === new Date().getFullYear(), + ).toBeTruthy() + + const updatedName = 'Shakuntala Devi' + const updatedUser2 = await prisma.user.update({ + data: { + name: updatedName, + }, + where: { + id: user2.id, + }, + }) + + expect(updatedUser2.name).toEqual(updatedName) + }) + + test('Get all published posts with a given tag', async () => { + // Retrieve all published posts + const taggedPosts = await prisma.tag.findUnique({ + where: { + tag: 'Node.js', + }, + include: { + posts: true, + }, + }) + expect(taggedPosts).toBeTruthy() + expect(taggedPosts?.tag).toEqual('Node.js') + expect(taggedPosts?.posts.length).toEqual(2) + }) + + test('Create unpublished post with tags and comments for an existing user and then publish', async () => { + const newPost = await prisma.post.create({ + data: { + title: 'Join the Prisma Slack community', + content: 'http://slack.prisma.io', + published: false, + author: { + connect: { + email: 'alice@prisma.io', // Should have been created during initial seeding + }, + }, + tags: { + connectOrCreate: { + create: { + tag: 'Community', + }, + where: { + tag: 'Community', + }, + }, + connect: { + tag: 'Prisma', + }, + }, + comments: { + create: { + comment: 'Looking forward to joining to Prisma community.', + writtenBy: { + connect: { + email: 'shakuntala@prisma.io', + }, + }, + }, + }, + }, + include: { + comments: true, + tags: true, + }, + }) + expect(newPost).toBeTruthy() + expect(newPost.tags.length).toEqual(2) + expect(newPost.tags[0].tag).toBeTruthy() + expect(newPost.comments.length).toEqual(1) + + await prisma.post.update({ + where: { + id: newPost.id, + }, + data: { + published: true, + }, + }) + }) + + test('Find comments by a user', async () => { + // Retrieve all published posts + const userComments = await prisma.user + .findUnique({ + where: { + email: 'shakuntala@prisma.io', + }, + }) + .comments() + expect(userComments).toBeTruthy() + expect(userComments?.length).toEqual(2) + }) + + test('Fetch posts by a user', async () => { + // Retrieve all published posts + const userPosts = await prisma.user + .findUnique({ + where: { + email: 'shakuntala@prisma.io', + }, + }) + .posts() + expect(userPosts).toBeTruthy() + expect(userPosts?.length).toEqual(3) + }) +}) diff --git a/databases/tidb/tsconfig.json b/databases/tidb/tsconfig.json new file mode 100644 index 000000000000..a8744c799f6d --- /dev/null +++ b/databases/tidb/tsconfig.json @@ -0,0 +1,13 @@ +{ + "compilerOptions": { + "sourceMap": true, + "outDir": "dist", + "target": "ES2018", + "module": "commonjs", + "strict": true, + "lib": ["esnext"], + "esModuleInterop": true + }, + "exclude": ["dist"], + "include": ["tests"] +}