diff --git a/_includes/android/objects.md b/_includes/android/objects.md index f8f5005f3..b1c0071f5 100644 --- a/_includes/android/objects.md +++ b/_includes/android/objects.md @@ -30,7 +30,7 @@ After this code runs, you will probably be wondering if anything really happened ```javascript objectId: "xWMyZ4YEGZ", score: 1337, playerName: "Sean Plott", cheatMode: false, -createdAt:"2011-06-10T18:33:42Z", updatedAt:"2011-06-10T18:33:42Z" +createdAt:"2022-01-01T12:23:45.678Z", updatedAt:"2022-01-01T12:23:45.678Z" ``` There are two things to note here. You didn't have to configure or set up a new Class called `GameScore` before running this code. Your Parse app lazily creates this Class for you when it first encounters it. diff --git a/_includes/arduino/objects.md b/_includes/arduino/objects.md index 77d16fe9d..32fda3ee7 100644 --- a/_includes/arduino/objects.md +++ b/_includes/arduino/objects.md @@ -36,7 +36,7 @@ response.close(); // Free the resource After this code runs, you will probably be wondering if anything really happened. To make sure the data was saved, you can look at the Data Browser in your app on Parse. You should see something like this: ```javascript - objectId: "xWMyZ4YEGZ", temperature: 175.0, leverDown: true, createdAt: "2011-06-10T18:33:42Z", updatedAt: "2011-06-10T18:33:42Z" + objectId: "xWMyZ4YEGZ", temperature: 175.0, leverDown: true, createdAt: "2022-01-01T12:23:45.678Z", updatedAt: "2022-01-01T12:23:45.678Z" ``` There are two things to note here. You didn't have to configure or set up a new Class called `Temperature` before running this code. Your Parse app lazily creates this Class for you when it first encounters it. @@ -106,7 +106,7 @@ create.setClassName("TestObject"); create.add("number", 42.0); create.add("foo", "bar"); create.addGeoPoint("location", 40.0, -30.0); -create.addJSONValue("dateField", "{ \"__type\": \"Date\", \"iso\": \"2011-08-21T18:02:52.249Z\" }"); create.addJSONValue("arrayField", "[ 30, \"string\" ]"); +create.addJSONValue("dateField", "{ \"__type\": \"Date\", \"iso\": \"2022-01-01T12:23:45.678Z\" }"); create.addJSONValue("arrayField", "[ 30, \"string\" ]"); create.addJSONValue("objectField", "{ \"number\": 30, \"string\": \"baz\" }"); create.addJSONValue("emptyField", "null"); create.send(); diff --git a/_includes/common/data.md b/_includes/common/data.md index 5e3bc2059..413b28cdb 100644 --- a/_includes/common/data.md +++ b/_includes/common/data.md @@ -42,8 +42,8 @@ The JSON format is an array of objects in our REST format or a JSON object with "score": 1337, "playerName": "Sean Plott", "cheatMode": false, - "createdAt": "2012-07-11T20:56:12.347Z", - "updatedAt": "2012-07-11T20:56:12.347Z", + "createdAt": "2022-01-01T12:23:45.678Z", + "updatedAt": "2022-01-01T12:23:45.678Z", "objectId": "fchpZwSuGG" }] } diff --git a/_includes/dotnet/objects.md b/_includes/dotnet/objects.md index 331dee6c7..a99d3b7c5 100644 --- a/_includes/dotnet/objects.md +++ b/_includes/dotnet/objects.md @@ -30,7 +30,7 @@ After this code runs, you will probably be wondering if anything really happened ```javascript objectId: "xWMyZ4YEGZ", score: 1337, playerName: "Sean Plott", cheatMode: false, -createdAt:"2011-06-10T18:33:42Z", updatedAt:"2011-06-10T18:33:42Z" +createdAt:"2022-01-01T12:23:45.678Z", updatedAt:"2022-01-01T12:23:45.678Z" ``` There are two things to note here. You didn't have to configure or set up a new Class called `GameScore` before running this code. Your Parse app lazily creates this Class for you when it first encounters it. diff --git a/_includes/graphql/objects.md b/_includes/graphql/objects.md index 9f5df0daa..8881ad98a 100644 --- a/_includes/graphql/objects.md +++ b/_includes/graphql/objects.md @@ -53,8 +53,8 @@ mutation createAGameScore { "clientMutationId": "anUniqueId", "gameScore": { "id": "R2FtZVNjb3JlOjZtdGlNcmtXNnY=", - "updatedAt": "2019-12-02T10:14:28.786Z", - "createdAt": "2019-12-02T10:14:28.786Z", + "updatedAt": "2022-01-01T12:23:45.678Z", + "createdAt": "2022-01-01T12:23:45.678Z", "playerName": "Sean Plott", "score": 1337, "cheatMode": false, diff --git a/_includes/ios/objects.md b/_includes/ios/objects.md index 0b9278571..d0e0a33ff 100644 --- a/_includes/ios/objects.md +++ b/_includes/ios/objects.md @@ -51,7 +51,7 @@ After this code runs, you will probably be wondering if anything really happened ```js objectId: "xWMyZ4YEGZ", score: 1337, playerName: "Sean Plott", cheatMode: false, -createdAt:"2011-06-10T18:33:42Z", updatedAt:"2011-06-10T18:33:42Z" +createdAt:"2022-01-01T12:23:45.678Z", updatedAt:"2022-01-01T12:23:45.678Z" ``` There are two things to note here. You didn't have to configure or set up a new Class called `GameScore` before running this code. Your Parse app lazily creates this Class for you when it first encounters it. diff --git a/_includes/js/objects.md b/_includes/js/objects.md index c3fce7580..39197859f 100644 --- a/_includes/js/objects.md +++ b/_includes/js/objects.md @@ -146,7 +146,7 @@ After this code runs, you will probably be wondering if anything really happened ```json objectId: "xWMyZ4YEGZ", score: 1337, playerName: "Sean Plott", cheatMode: false, -createdAt:"2011-06-10T18:33:42Z", updatedAt:"2011-06-10T18:33:42Z" +createdAt:"2022-01-01T12:23:45.678Z", updatedAt:"2022-01-01T12:23:45.678Z" ``` There are two things to note here. You didn't have to configure or set up a new Class called `GameScore` before running this code. Your Parse app lazily creates this Class for you when it first encounters it. diff --git a/_includes/js/users.md b/_includes/js/users.md index b2ee7efe9..514c19dec 100644 --- a/_includes/js/users.md +++ b/_includes/js/users.md @@ -417,8 +417,8 @@ With a response body like: ```json { "username": "Parse", - "createdAt": "2012-02-28T23:49:36.353Z", - "updatedAt": "2012-02-28T23:49:36.353Z", + "createdAt": "2022-01-01T12:23:45.678Z", + "updatedAt": "2022-01-01T12:23:45.678Z", "objectId": "uMz0YZeAqc", "sessionToken": "r:samplei3l83eerhnln0ecxgy5", "authData": { @@ -439,7 +439,7 @@ The body of the response will contain the `objectId`, `createdAt`, `sessionToken ```json { "username": "iwz8sna7sug28v4eyu7t89fij", - "createdAt": "2012-02-28T23:49:36.353Z", + "createdAt": "2022-01-01T12:23:45.678Z", "objectId": "uMz0YZeAqc", "sessionToken": "r:samplei3l83eerhnln0ecxgy5" } diff --git a/_includes/parse-server/Files-Migration.md b/_includes/parse-server/Files-Migration.md deleted file mode 100644 index bf8141753..000000000 --- a/_includes/parse-server/Files-Migration.md +++ /dev/null @@ -1,22 +0,0 @@ -In order of priority and easiest to hardest, here's how we should allow files migration from Hosted Parse to Parse Server: - -### In Flight - -Parse Server knows how to access and read existing files in Hosted Parse S3. We should make so that whenever we have a request for a file still on Hosted Parse, Parse Server downloads the original, uploads it to your file store and updates the reference in Mongo. This should probably be an option, maybe not everyone wants to allow this? - -* Pros: Easy to implement, seamless for users -* Cons: Small latency hit the first time we copy the file (although we could just redirect and defer the download), could potentially miss some never accessed files, how to deal with concurrency? - -### Mongo Objects - -Have a helper script in Parse Server that looks up the Schema places with known file objects, iterates over all those object and copies them to the new data store using the file adapter. - -* Pros: guaranties that all referenced files are copied, would allow you to migrate from one file adapter to another -* Cons: can be costly and take a long time to iterate on all Mongo objects for large apps, what to do for file objects within objects/arrays? - -### S3 Listing - -We could have an API on Hosted Parse that once authenticated, returns the list of all the files an app has in S3 by doing a listing on the bucket. - -* Pros: users control how they want to sync their files, ability to download all files even those deleted in Mongo -* Cons: need to implement some API on Hosted Parse for security, listing job can take a long time for apps with lots of files \ No newline at end of file diff --git a/_includes/parse-server/MongoRocks.md b/_includes/parse-server/MongoRocks.md deleted file mode 100644 index 873e3328e..000000000 --- a/_includes/parse-server/MongoRocks.md +++ /dev/null @@ -1,201 +0,0 @@ -# Using MongoDB + RocksDB - -## MongoRocks: What and Why? - -**Quick Version** - -Parse has been using MongoDB on RocksDB (MongoRocks) for application data since [April, 2015](http://blog.parse.com/announcements/mongodb-rocksdb-parse/). If you are migrating your Parse app(s) to your own MongoDB infrastructure, we recommend using MongoRocks to take advantage of the increased performance, greater efficiency, and powerful backup capabilities offered by the RocksDB storage engine. - -**Long Version** - -In version 3.0, MongoDB introduced the storage engine API to allow users an alternative to the default memory mapped (MMAP) storage engine used by earlier versions of MongoDB. In 2015, Facebook developed a RocksDB implementation of the storage engine API, MongoRocks, which is used by Parse for all customer data. RocksDB is an embeddable persistent key-value store developed by Facebook. It uses a [Log-structured Merge Tree](https://en.wikipedia.org/wiki/Log-structured_merge-tree) (LSM) for storage and is designed for high write throughput and storage efficiency. - -### Improved Performance and Efficiency - -When Parse switched from MMAP to MongoRocks, we discovered the following benefits in our [benchmarking](http://blog.parse.com/learn/engineering/mongodb-rocksdb-writing-so-fast-it-makes-your-head-spin/): - -- 50x increase in write performance -- 90% reduction in storage size -- significantly reduced latency on concurrent workloads due to reduced lock contention - -### Simple and efficient hot backups - -In addition to performance gains, a major advantage of MongoRocks (and RocksDB in general) is very efficient backups that do not require downtime. As detailed in this [blog post](http://blog.parse.com/learn/engineering/strata-open-source-library-for-efficient-mongodb-backups/), RocksDB backups can be taken on a live DB without interrupting service. RocksDB also supports incremental backups, reducing the I/O, network, and storage costs of doing backups and allowing backups to run more frequently. At Parse, we reduced DB infrastructure costs by more than 20% by using MongoRocks, the Strata backup tool, and Amazon S3 in place of MMAP and EBS Snapshots. - -### Are there any reasons not to use MongoRocks? - -Generally speaking, MongoRocks was suitable for running all app workloads at Parse. However, there are some workloads for which LSM are not ideal, and for which better performance may be achieved with other storage engines like MMAP or WiredTiger, such as: - -- Applications with high number of in-place updates or deletes. For example, a very busy work queue or heap. -- Applications with queries that scan many documents *and* fit entirely in memory. - -It's difficult to make precise statements about performance for any given workload without data. When in doubt, run your own benchmarks. You can use the [flashback](https://github.com/parseplatform/flashback) toolset to record and replay benchmarks based on live traffic. - -## Example: Provisioning on Ubuntu and AWS - -There are hundreds of ways to build out your infrastructure. For illustration we use an AWS and Ubuntu configuration similar to that used by Parse. You will need a set of AWS access keys and the AWS CLI. - -### Choosing Hardware - -At Parse, we use AWS i2.* (i/o optimized) class instances with ephemeral storage for running MongoRocks. Prior to this, when we used the MMAP storage engine, we used r3.* (memory optimized) instances with EBS PIOPS storage. Why the change? - -- RocksDB is designed to take full advantage of SSD storage. We also experienced large bursts of I/O for some workloads, and provisioning enough IOPS with EBS to support this was expensive. The ephemeral SSDs provided by the i2 class were ideal in our case. -- MongoRocks uses significantly more CPU than MMAP due to compression. CPU was never a major factor in MMAP. -- Memory is less critical in MongoRocks. Memory is everything in MMAP. -- EBS snapshots were critical to our backup strategy with MMAP. With MongoRocks, we had incremental backups with strata, so snapshots were not needed. - -If you're not sure about your workload requirements, we recommend running on the i2 class instances. You can always change this later depending on your production experience. - -Below is a general guide for instance sizing based on your existing Parse request traffic: - -- < 100 requests/sec: i2.xlarge -- 100-500 requests/sec: i2.2xlarge -- 500+ requests/sec: i2.4xlarge - -This guide will use i2.2xlarge as an example. - -### Provisioning - -We recommend you run MongoDB in replica set mode, with at least three nodes for availablity. Each node should run in a separate Availability Zone. - -There are dozens of ways to provision hosts in AWS. For reference, we use the AWS CLI below, but the inputs can be easily translated to your tool of choice. - -```sh -$ SECURITY_GROUP= -$ US_EAST_1A_SUBNET= -$ US_EAST_1C_SUBNET= -$ US_EAST_1D_SUBNET= -$ aws ec2 run-instances —image-id ami-fce3c696 --instance-type i2.2xlarge --key-name chef3 --block-device-mappings '[{"DeviceName": "/dev/sdb", "VirtualName": "ephemeral0"},{"DeviceName": "/dev/sdc", "VirtualName": "ephemeral1"}]' --security-group-ids ${SECURITY_GROUP} --subnet-id ${US_EAST_1A_SUBNET} --associate-public-ip-address -$ aws ec2 run-instances —image-id ami-fce3c696 --instance-type i2.2xlarge --key-name chef3 --block-device-mappings '[{"DeviceName": "/dev/sdb", "VirtualName": "ephemeral0"},{"DeviceName": "/dev/sdc", "VirtualName": "ephemeral1"}]' --security-group-ids ${SECURITY_GROUP} --subnet-id ${US_EAST_1D_SUBNET} --associate-public-ip-address -$ aws ec2 run-instances —image-id ami-fce3c696 --instance-type i2.2xlarge --key-name chef3 --block-device-mappings '[{"DeviceName": "/dev/sdb", "VirtualName": "ephemeral0"},{"DeviceName": "/dev/sdc", "VirtualName": "ephemeral1"}]' --security-group-ids ${SECURITY_GROUP} --subnet-id ${US_EAST_1D_SUBNET} --associate-public-ip-address -``` - -### Configuring Storage - -The i2.2xlarge and larger instances have multiple ephemeral volumes that should be striped together to produce your data volume. On each host, use **mdadm** to create the raid volume: - -```sh -$ sudo apt-get install mdadm -$ sudo mdadm —create /dev/md0 --level=stripe /dev/xvdb /dev/xvdc -$ sudo mkfs -t ext4 /dev/md0 -$ sudo mkdir -p /var/lib/mongodb -$ sudo mount /dev/md0 /var/lib/mongodb -``` - -## Installing MongoRocks - -To use MongoRocks, you will need to use a special build of MongoDB that has the storage engine compiled in. At Parse, we run an internally built version, as a pre-packaged version of MongoRocks did not exist when we initially migrated. For new installations, we recommend that you use the Percona builds located [here](https://www.percona.com/downloads/percona-server-mongodb/LATEST/). These builds are 100% feature compatible with the official MongoDB releases, but have been compiled to include the RocksDB storage engine. We have tested the Percona builds with the Parse migration utility and the strata backup software, and verified that both work and are suitable for running Parse apps in production. - -### Ubuntu installation - -```sh -$ curl -s -O https://www.percona.com/downloads/percona-server-mongodb/percona-server-mongodb-3.0.8-1.2/binary/debian/trusty/x86_64/percona-server-mongodb-3.0.8-1.2-r97f91ef-trusty-x86_64-bundle.tar -$ tar -xf percona-server-mongodb-3.0.8-1.2-r97f91ef-trusty-x86_64-bundle.tar -$ sudo dpkg -i percona-server-mongodb-* -``` - -### Configuration - -Configuring MongoDB to use the RocksDB storage engine is a matter of setting a few flags in the mongodb.conf file. For complete documentation of all MongoDB configuration options, visit the MongoDB reference page for [Configuration File Options](https://docs.mongodb.com/v3.0/reference/configuration-options/). - -First, set the storage engine parameter to instruct MongoDB to use the RocksDB storage engine. - -``` -storage: - dbPath: /var/lib/mongodb - journal: - enabled: true - engine: rocksdb -``` - -Next, some additional parameters. - -``` -# RockDB tuning parameters -# Yield if it's been at least this many milliseconds since we last yielded. -setParameter = internalQueryExecYieldPeriodMS=1000 -# Yield after this many "should yield?" checks. -setParameter = internalQueryExecYieldIterations=100000 -``` - -The adjustments to the internalQueryExecYield\* options reduce the frequency that MongoDB yields for writers. Since RocksDB has document level locking, frequent yielding is not necessary. - -### Startup - -When starting MongoRocks on a host for the very first time, your storage directory (e.g. /var/lib/mongodb) should be empty. If you have existing data from other storage engines (i.e. MMAP or WiredTiger), you should back up and remove those data files, as the storage formats are not compatible. - -## Backups - -### Installing strata - -Strata is written in go. It requires go 1.4 or later to compile. You can use apt or yum to install go, but these packages are frequently out of date on common distributions. To install a more recent version of go: - -```sh -$ curl https://storage.googleapis.com/golang/go1.5.3.linux-amd64.tar.gz | sudo tar xzf - -C /usr/local -$ sudo mkdir /go -$ sudo chmod 0777 /go -``` - -You will need to add go to your *PATH* environment variable and set GOPATH. On ubuntu, this is as simple as: - -```sh -$ echo -e 'export PATH="/usr/local/go/bin:${PATH}:" \nexport GOPATH=/go' | sudo tee /etc/profile.d/gopath.sh -``` - -After logging in again, you can test that go is installed by running - -```sh -$ go version -go version go1.5.3 linux/amd64 -``` - -### Installing strata - -With go installed, compiling and installing strata is simply a matter of using `go install`: - -```sh -$ go get github.com/facebookgo/rocks-strata/strata/cmd/mongo/lreplica_s3storage_driver/strata -$ go install github.com/facebookgo/rocks-strata/strata/cmd/mongo/lreplica_s3storage_driver/strata -``` - -This installs the strata binary to `$GOPATH/bin/strata` - -### Configuring backups - -At Parse, we deployed strata using a simple distributed cron on all backup nodes. You can find a sample cron and and schedule [here](https://github.com/facebookgo/rocks-strata/blob/master/examples/backup/run.sh) in the rocks-strata repository. - -At a high level, the three things you want to do regularly when running backups with strata are: - -1. Run `strata backup` to create the actual backup. This stores the data files and backup metadata in S3, identified by a unique replica ID. Each host must have its own replica ID. For example, if your RS is named "mydata" and your host name is "db1", you might use "mydata-db1" as your replica ID. -2. Run `strata delete` to prune metadata for backups older than a certain date. The retention period that you specify is dependent on your needs. -3. Run `strata gc` to delete data files that are orphaned by `strata delete`. - -### Displaying backups - -You can view backup metadata at any time with `strata show backups`: - -For example, to see all backups for node *db1* in replica set *mydb*, you would run something like this: - -```sh -$ strata --bucket=mybucket --bucket-prefix=mongo-rocks show backups --replica-id=mydb-db1 - -ID data num files size (GB) incremental files incremental size duration -0 2015-09-02 21:11:20 UTC 4 0.000005 4 0.000005 187.929573ms -``` - -More documentation on strata, including how to restore backups, can be found [here](https://github.com/facebookgo/rocks-strata). - -## Migrating Existing Data to MongoRocks - -### Upgrading an existing replica set to MongoRocks - -The data files used by MMAP, WiredTiger, and RocksDB are not compatible. In other words, you cannot start MongoRocks using existing MMAP or Wiredtiger data. To change storage formats, you must do one of the following: - -1. Do a logical export and import using [mongodump](https://docs.mongodb.com/v3.0/reference/program/mongodump/) and [mongorestore](https://docs.mongodb.com/manual/reference/program/mongorestore/). -2. Perform an initial sync of data using replication - -Option 2 is the easiest, as you can bring a new, empty node online and add it to the replica set without incurring downtime. This approach usually works fine until your data size is in the hundreds of gigabytes. To do so: - -1. Provision a new node configured for RocksDB, following the above steps. -2. Add the node to your replica set using [rs.add()](https://docs.mongodb.com/v3.0/reference/method/rs.add/) -3. Wait for initial sync. Note that your data sync must complete before the oplog window expires. Depending on the size of your data, you may need to [resize your oplog](https://docs.mongodb.com/v3.0/tutorial/change-oplog-size/) diff --git a/_includes/parse-server/ParsePlatform.md b/_includes/parse-server/ParsePlatform.md deleted file mode 100644 index e7411aa68..000000000 --- a/_includes/parse-server/ParsePlatform.md +++ /dev/null @@ -1,37 +0,0 @@ -# Using MongoDB + RocksDB - -### Provisioning - -We recommend you run MongoDB in replica set mode, with at least three nodes for availablity. Each node should run in a separate Availability Zone. - -### Configuring Storage - - - Formatting data volumes with the XFS filesystem is strongly recommended. - -### Configuration - -#### MongoDB 3.0 - -Configuring MongoDB to use the WireTiger storage engine is a matter of setting a few flags in the mongodb.conf file. For complete documentation of all MongoDB configuration options, visit the MongoDB reference page for [Configuration File Options](https://docs.mongodb.com/v3.0/reference/configuration-options/). - -First, set the storage engine parameter to instruct MongoDB to use the WiredTiger storage engine. - -``` -storage: - dbPath: /var/lib/mongodb - journal: - enabled: true - engine: wiredTiger -``` - -### Startup - -When starting MongoDB with WiredTiger on a host for the very first time, your storage directory (e.g. /var/lib/mongodb) should be empty. If you have existing data from other storage engines (i.e. MMAP or MongoRocks), you should back up and remove those data files, as the storage formats are not compatible. - -## Backups - -If taking snapshots ensure that the journal lives on the same volume as the data files for WiredTiger. - -From the MongoDB Manual: - - > Snapshotting with the journal is only possible if the journal resides on the same volume as the data files, so that one snapshot operation captures the journal state and data file state atomically. diff --git a/_includes/parse-server/database.md b/_includes/parse-server/database.md index 739039d6b..851a3a0eb 100644 --- a/_includes/parse-server/database.md +++ b/_includes/parse-server/database.md @@ -8,25 +8,21 @@ The prefered database is MongoDB but Postgres is a great option if you're starti If you have not used MongoDB before, we highly recommend familiarizing yourself with it first before proceeding. -The Mongo requirements for Parse Server are: - -* MongoDB version 3.6 -* An SSL connection is recommended (but not required). - If this is your first time setting up a MongoDB instance, we recommend a Database-as-a-Service (DBaaS) like [MongoDB Atlas](https://www.mongodb.com/cloud/atlas) or [ObjectRocket](https://objectrocket.com/) which provide fully managed MongoDB instances and can help you scale as needed. +Ensure that the MongoDB version is compatible with your version of Parse Server, for details see the [compatibility table](https://github.com/parse-community/parse-server#compatibility) + When using MongoDB with your Parse app, you need to manage your indexes yourself. You will also need to size up your database as your data grows. -In order to allow for better scaling of your data layer, it is possible to direct queries to a mongodb secondary for read operations. See: [Mongo Read Preference](#using-mongodb-read-preference). +In order to allow for better scaling of your data layer, it is possible to direct queries to a MongoDB secondary for read operations. See: [MongoDB Read Preference](#using-mongodb-read-preference). ## Postgres -The Postgres requirements for Parse Server are: +Ensure that the Postgres version is compatible with your version of Parse Server, for details see the [compatibility table](https://github.com/parse-community/parse-server#compatibility) -* Postgres version 9.5 -* PostGIS extensions 2.3 +[PostGIS](https://postgis.net) is required if you plan to use geographic or location features. -The postgres database adapter will be automatically loaded when you pass a valid postgres URL, for example: `postgres://localhost:5432`. The available configuration options through the URL are: +The Postgres database adapter will be automatically loaded when you pass a valid Postgres URL, for example: `postgres://localhost:5432`. The available configuration options through the URL are: ``` postgres://localhost:5432/db?ssl=boolean&rejectUnauthorized=boolean&ca=/path/to/file&pfx=/path/to/file&cert=/path/to/file&key=/path/to/file&passphrase=string&secureOptions=number&client_encoding=string&application_name=string&fallback_application_name=string&max=number&query_timeout=idleTimeoutMillis=number&poolSize=number&binary=boolean&keepAlive=boolean @@ -44,8 +40,8 @@ Details about the configuration options can be found on [pg-promise](https://git * You will need to configure a [file adapter](#configuring-file-adapters) in order to store files. * Join tables are resolved in memory, there is no performance improvements using Postgres over MongoDB for relations or pointers. * Mutating the schema implies running ALTER TABLE, therefore we recommend you setup your schema when your tables are not full. -* The postgres URL for 4.2.0 and below only supports the following configuration options: +* The Postgres URL for Parse Server 4.2.0 and below only supports the following configuration options: - ``` - postgres://localhost:5432/db?ssl=boolean&client_encoding=string&application_name=string&fallback_application_name=string&poolSize=number&binary=boolean&keepAlive=boolean - ``` +``` +postgres://localhost:5432/db?ssl=boolean&client_encoding=string&application_name=string&fallback_application_name=string&poolSize=number&binary=boolean&keepAlive=boolean +``` diff --git a/_includes/parse-server/deploying-aws-ubuntu.md b/_includes/parse-server/deploying-aws-ubuntu.md new file mode 100644 index 000000000..2caf2c2f5 --- /dev/null +++ b/_includes/parse-server/deploying-aws-ubuntu.md @@ -0,0 +1,187 @@ +## Deploying on AWS EC2 Ubuntu using PostgreSQL + +Here are the steps: + +1. Log into your AWS account or create a new one [AWS Account](https://aws.amazon.com/premiumsupport/knowledge-center/create-and-activate-aws-account/) +2. Go to AWS EC2 Dashboard +3. Launch Instances +4. On **Application and OS Images (Amazon Machine Image)** select `Ubuntu` +5. On **Instance Type** select `t2.micro` (is ok for testing and small projects which is Free tier eligible) +6. Create or select an existing `key pair`. (If you create a new one click `Download Key Pair`) +7. Click `Launch Instance` +8. On **EC2 Dashboard** select the new created instance and click `Security` +9. On **Security Groups**, click on the security group +10. On **Inbound Rules** tab, click `Edit Inbound Rules` +11. Click `Add rule` and select `PostgreSQL` from the dropdown menu and `Anywhere-IPv4`. +12. Click `Save rules` +13. On **EC2 Dashboard** select the new created instance and click `Connect` +14. Click `SSH Client` and follow the instructions + +Once logged into the ec2 instance we perform the following tasks: + +### Install Libraries and Dependencies +Update the local package manager `apt` +```bash +sudo apt update +``` +Install [NodeJS](https://nodejs.org) +```bash +sudo apt install nodejs +``` +Check the install was ok, you should see the version installed. +```bash +node -v +``` + + +Install [npm](https://www.npmjs.com) +```bash +sudo apt install npm +``` + + + +Install [yarn](https://yarnpkg.com) +```bash +sudo npm install yarn –g +``` + +### Install PostgreSQL Server + +Install [PostgreSQL](https://www.postgresql.org) +```bash +sudo apt-get -y install postgresql +``` +Once is installed, create a password for the user `postgres` + +```bash +sudo su postgres +``` + +```bash +psql +``` + +```bash +ALTER USER postgres password 'myStrongPassword'; +``` + +Quit psql typing `\q` + +Exit postgres user typing `exit` + +Navigate to main folder inside postgresql/version/ +```bash +cd /etc/postgresql/14/main/ +``` +We need to edit two files, `pg_hba.conf` and `postgresql.conf` +```bash +sudo nano pg_hba.conf +``` +Scroll down the file and Add `host, all, all, 0.0.0.0/0, md5`, has to be the first line before `local, all, postgres, , peer` + +| TYPE | DATABASE | USER | ADDRESS | METHOD | +| ---- | -------- | ---- | ------- | ------ | +| host | all | all | 0.0.0.0/0 | md5 | +| local | all | postgres | | peer | + + +```bash +sudo nano postgresql.conf +``` +Search for `#listen_addresses='localhost'`, uncomment the line and replace `localhost` for `*` + +Restart the PostgreSQL server +```bash +sudo service postgresql restart +``` + +### Setup Parse Server + +Create a directory +```bash +cd ~ +``` +```bash +mkdir parse-server +``` +```bash +cd parse-server +``` + +Run the bash script and follow the instructions, the script have some visual issues and the keys generation doesn't work. +```bash +sh <(curl -fsSL https://raw.githubusercontent.com/parse-community/parse-server/master/bootstrap.sh) +``` +After that, we need to setup the configuration file, use your own `appId`, `masterKey` and `clientKey`, use random strings or some generator tool to create secured keys. +```bash +sudo nano -w config.json +``` +This are the basic options of the config.json file, for the full list you can type `parse-server --help` or refer to the [full options document](https://parseplatform.org/parse-server/api/5.2.0/ParseServerOptions.html) for more details. +```js +{ + "appId": "exampleAppId", + "masterKey": "exampleMasterKey", + "clientKey": "exampleClientKey", + "appName": "MyApp", + "cloud": "./cloud/main", + "databaseURI": "postgres://postgres:myStrongPassword@localhost:5432/postgres" +} +``` + + +Install Parse Server globally + +```bash +sudo npm install -g parse-server +``` + +Start Parse Server using the script command in the config.json +```bash +npm start +``` +or manually with the nohup command and specifying the configuration file, this option will keep the server running even if you close the terminal +```bash +nohup parse-server config.json & +``` +Check if Parse Server is running typing `http://:1337` in your browser's address bar, you should see `{"error":"unauthorized"}` + +### Setup Parse Dashboard + +Install Parse Dashboard globally +```bash +sudo npm install -g parse-dashboard +``` + +Once installed, you need to configure Parse Dashboard, go to `/usr/lib/node_modules/parse-dashboard/Parse-Dashboard/` and edit the file `parse-dashboard-config.json` +```bash +sudo nano -w parse-dashboard-config.json +``` +This is an example of parse-dashboard.config.json. +```js +{ + "apps": [{ + "serverURL": "http://example.com:1337/parse", + "appId": "exampleAppId", + "masterKey": "exampleMasterKey", + "allowInsecureHTTP": "true", + "appName": "MyApp" + }], + "users": [{ + "user": "admin", + "pass": "password" + }] +} +``` + +Start Parse Dashboard +```bash +parse-dashboard +``` +or with the nohup command and specifying the configuration file, this option will keep the dashboard running even if you close the terminal +```bash +nohup parse-dashboard --dev --config parse-dashboard-config.json & +``` + +Check if Parse Dashboard is running typing `http://:4040` in your browser's address bar, you should see the login form, use the `user` and `pass` that you set in the `parse-dashboard-config.json` file. + diff --git a/_includes/parse-server/deploying.md b/_includes/parse-server/deploying.md index b36f33761..4924106ce 100644 --- a/_includes/parse-server/deploying.md +++ b/_includes/parse-server/deploying.md @@ -7,3 +7,5 @@ The fastest and easiest way to start using Parse Server is to run MongoDB and Pa {% include_relative _includes/parse-server/deploying-glitch-mlab.md %} {% include_relative _includes/parse-server/deploying-back4app.md %} + +{% include_relative _includes/parse-server/deploying-aws-ubuntu.md %} \ No newline at end of file diff --git a/_includes/parse-server/file-adapters.md b/_includes/parse-server/file-adapters.md index 9dbe826b7..336d31155 100644 --- a/_includes/parse-server/file-adapters.md +++ b/_includes/parse-server/file-adapters.md @@ -110,7 +110,6 @@ First you will create a bucket in S3 to hold these files. 8. Now select the **Policies** tab, then **Create Policy**. 9. Select **Create Your Own Policy**, fill out a **Policy Name**. 10. Copy the following config in **Policy Document**, changing **BUCKET_NAME** for the name of the bucket you created earlier. (note: this is a little more permissive than Parse Server needs, but it works for now) - ```js { "Version": "2012-10-17", diff --git a/_includes/parse-server/getting-started.md b/_includes/parse-server/getting-started.md index 87a58fd18..7080e9be3 100644 --- a/_includes/parse-server/getting-started.md +++ b/_includes/parse-server/getting-started.md @@ -6,20 +6,25 @@ Parse Server is an open source backend that can be deployed to any infrastructur * You can deploy and run Parse Server on your own infrastructure. * You can develop and test your app locally using Node. +The following guide describes how to set up Parse Server on your personal computer for local development. If you want to be able to access Parse Server from anywhere and make your app accessible publicly, you would want to deploy Parse Server to a cloud service provider like Amazon Web Services, Google Cloud, Microsoft Azure, Heroku or DigitalOcean. These providers vary in set-up complexity, configuration efforts, pricing model and required knowledge to secure your deployment. You can find guides for how to deploy Parse Server for specific providers in [Deploying Parse Server]({{ site.baseUrl }}/parse-server/guide/#deploying-parse-server) section. + +⚠️ Before making Parse Server accessible publicly, we strongly recommend to review all of your Parse Server configuration and read our [best practice]({{ site.baseUrl }}/parse-server/guide/#best-practice) guide. Failing to properly adapt your Parse Server configuration for a publicly accessible environment may make your deployment vulnerable to malicious intrusions, data leaks and unexpected cost increases. + **Prerequisites** -* Node 8 or newer -* MongoDB version 3.6 -* Python 2.x (For Windows users, 2.7.1 is the required version) -* For deployment, an infrastructure provider like Heroku or AWS +* Node +* MongoDB (to use Parse Server with MongoDB) +* PostgreSQL (to use Parse Server with PostgreSQL) + +Ensure that the Node.js version is compatible with your version of Parse Server, for details see the [compatibility table](https://github.com/parse-community/parse-server#compatibility). The fastest and easiest way to get started is to run MongoDB and Parse Server locally. Use the bootstrap script to set up Parse Server in the current directory. ```bash -$ sh <(curl -fsSL https://raw.githubusercontent.com/parse-community/parse-server/master/bootstrap.sh) -$ npm install -g mongodb-runner -$ mongodb-runner start -$ npm start +sh <(curl -fsSL https://raw.githubusercontent.com/parse-community/parse-server/master/bootstrap.sh) +npm install -g mongodb-runner +mongodb-runner start +npm start ``` You can use any arbitrary string as your application id and master key. These will be used by your clients to authenticate with the Parse Server. @@ -43,14 +48,14 @@ You should get a response similar to this: ```js { "objectId": "2ntvSpRGIK", - "createdAt": "2016-03-11T23:51:48.050Z" + "createdAt": "2022-01-01T12:23:45.678Z" } ``` You can now retrieve this object directly (make sure to replace `2ntvSpRGIK` with the actual `objectId` you received when the object was created): ```bash -$ curl -X GET \ +curl -X GET \ -H "X-Parse-Application-Id: APPLICATION_ID" \ http://localhost:1337/parse/classes/GameScore/2ntvSpRGIK ``` @@ -62,15 +67,15 @@ $ curl -X GET \ "score": 123, "playerName": "Sean Plott", "cheatMode": false, - "updatedAt": "2016-03-11T23:51:48.050Z", - "createdAt": "2016-03-11T23:51:48.050Z" + "updatedAt": "2022-01-01T12:23:45.678Z", + "createdAt": "2022-01-01T12:23:45.678Z" } ``` Keeping tracks of individual object ids is not ideal, however. In most cases you will want to run a query over the collection, like so: -``` -$ curl -X GET \ +```bash +curl -X GET \ -H "X-Parse-Application-Id: APPLICATION_ID" \ http://localhost:1337/parse/classes/GameScore ``` @@ -84,8 +89,8 @@ $ curl -X GET \ "score": 123, "playerName": "Sean Plott", "cheatMode": false, - "updatedAt": "2016-03-11T23:51:48.050Z", - "createdAt": "2016-03-11T23:51:48.050Z" + "updatedAt": "2022-01-01T12:23:45.678Z", + "createdAt": "2022-01-01T12:23:45.678Z" } ] } diff --git a/_includes/parse-server/MongoReadPreference.md b/_includes/parse-server/mongo-read-preference.md similarity index 100% rename from _includes/parse-server/MongoReadPreference.md rename to _includes/parse-server/mongo-read-preference.md diff --git a/_includes/parse-server/push-notifications-ios.md b/_includes/parse-server/push-notifications-ios.md index e5b55251a..b82df42b9 100644 --- a/_includes/parse-server/push-notifications-ios.md +++ b/_includes/parse-server/push-notifications-ios.md @@ -13,10 +13,10 @@ application.registerForRemoteNotifications() ```objc // Objective-C UIUserNotificationType userNotificationTypes = (UIUserNotificationTypeAlert | - UIUserNotificationTypeBadge | - UIUserNotificationTypeSound); +UIUserNotificationTypeBadge | +UIUserNotificationTypeSound); UIUserNotificationSettings *settings = [UIUserNotificationSettings settingsForTypes:userNotificationTypes - categories:nil]; +categories:nil]; [application registerUserNotificationSettings:settings]; [application registerForRemoteNotifications]; ``` @@ -24,10 +24,10 @@ UIUserNotificationSettings *settings = [UIUserNotificationSettings settingsForTy ```csharp // Xamarin UIUserNotificationType notificationTypes = (UIUserNotificationType.Alert | - UIUserNotificationType.Badge | - UIUserNotificationType.Sound); +UIUserNotificationType.Badge | +UIUserNotificationType.Sound); var settings = UIUserNotificationSettings.GetSettingsForTypes(notificationTypes, - new NSSet(new string[] { })); +new NSSet(new string[] { })); UIApplication.SharedApplication.RegisterUserNotificationSettings(settings); UIApplication.SharedApplication.RegisterForRemoteNotifications(); @@ -62,38 +62,29 @@ func application(application: UIApplication, didReceiveRemoteNotification userIn ```objc // Objective-C -- (void)application:(UIApplication *)application didRegisterForRemoteNotificationsWithDeviceToken:(NSData *)deviceToken { - // Store the deviceToken in the current installation and save it to Parse. - PFInstallation *currentInstallation = [PFInstallation currentInstallation]; - [currentInstallation setDeviceTokenFromData:deviceToken]; - [currentInstallation saveInBackground]; -} - -- (void)application:(UIApplication *)application didReceiveRemoteNotification:(NSDictionary *)userInfo { - [PFPush handlePush:userInfo]; -} +UIUserNotificationType userNotificationTypes = (UIUserNotificationTypeAlert | + UIUserNotificationTypeBadge | + UIUserNotificationTypeSound); +UIUserNotificationSettings *settings = [UIUserNotificationSettings settingsForTypes:userNotificationTypes + categories:nil]; +[application registerUserNotificationSettings:settings]; +[application registerForRemoteNotifications]; ``` ```csharp // Xamarin -public override void DidRegisterUserNotificationSettings(UIApplication application, - UIUserNotificationSettings notificationSettings) { - application.RegisterForRemoteNotifications(); -} - -public override void RegisteredForRemoteNotifications(UIApplication application, - NSData deviceToken) { - ParseInstallation installation = ParseInstallation.CurrentInstallation; - installation.SetDeviceTokenFromData(deviceToken); - - installation.SaveAsync(); -} +UIUserNotificationType notificationTypes = (UIUserNotificationType.Alert | + UIUserNotificationType.Badge | + UIUserNotificationType.Sound); +var settings = UIUserNotificationSettings.GetSettingsForTypes(notificationTypes, + new NSSet(new string[] { })); +UIApplication.SharedApplication.RegisterUserNotificationSettings(settings); +UIApplication.SharedApplication.RegisterForRemoteNotifications(); -public override void ReceivedRemoteNotification(UIApplication application, - NSDictionary userInfo) { - // We need this to fire userInfo into ParsePushNotificationReceived. - ParsePush.HandlePush(userInfo); -} +// Handle Push Notifications +ParsePush.ParsePushNotificationReceived += (object sender, ParsePushNotificationEventArgs args) => { + // Process Push Notification payload here. +}; ``` ##### Compile and run! diff --git a/_includes/parse-server/push-notifications.md b/_includes/parse-server/push-notifications.md index 9aaac3725..26e8880a8 100644 --- a/_includes/parse-server/push-notifications.md +++ b/_includes/parse-server/push-notifications.md @@ -44,7 +44,7 @@ You will need to obtain some credentials from FCM and APNS in order to send push #### APNS (iOS) -If you are setting up push notifications on iOS, tvOS or macOS for the first time, we recommend you visit the [raywenderlich.com's Push Notifications tutorial](https://www.raywenderlich.com/123862/push-notifications-tutorial) or [appcoda.com's iOS Push tutorial](https://www.appcoda.com/push-notification-ios/) to help you obtain a production Apple Push Certificate. Parse Server supports the PFX (`.p12`) file exported from Keychain Access. Parse Server also supports the push certificate and key in `.pem` format. Token-based authentication instead of a certificate is supported as well. +If you are setting up push notifications on iOS, tvOS or macOS for the first time, we recommend you visit the [raywenderlich.com's Push Notifications tutorial](https://www.raywenderlich.com/11395893-push-notifications-tutorial-getting-started) or [appcoda.com's iOS Push tutorial](https://www.appcoda.com/push-notification-ios/) to help you obtain a production Apple Push Certificate. Parse Server supports the PFX (`.p12`) file exported from Keychain Access. Parse Server also supports the push certificate and key in `.pem` format. Token-based authentication instead of a certificate is supported as well. #### FCM (Android) @@ -239,7 +239,7 @@ In your Parse Server logs, you can see something similar to ```js // FCM request and response -{"request":{"params":{"priority":"normal","data":{"time":"2016-02-10T03:21:59.065Z","push_id":"NTDgWw7kp8","data":"{\"alert\":\"All work and no play makes Jack a dull boy.\"}"}}},"response":{"multicast_id":5318039027588186000,"success":1,"failure":0,"canonical_ids":0,"results":[{"registration_id":"APA91bEdLpZnXT76vpkvkD7uWXEAgfrZgkiH_ybkzXqhaNcRw1KHOY0s9GUKNgneGxe2PqJ5Swk1-Vf852kpHAP0Mhoj5wd1MVXpRsRr_3KTQo_dkNd_5wcQ__yWnWLxbeM3kg_JziJK","message_id":"0:1455074519347821%df0f8ea7f9fd7ecd"}]}} +{"request":{"params":{"priority":"normal","data":{"time":"2022-01-01T12:23:45.678Z","push_id":"NTDgWw7kp8","data":"{\"alert\":\"All work and no play makes Jack a dull boy.\"}"}}},"response":{"multicast_id":5318039027588186000,"success":1,"failure":0,"canonical_ids":0,"results":[{"registration_id":"APA91bEdLpZnXT76vpkvkD7uWXEAgfrZgkiH_ybkzXqhaNcRw1KHOY0s9GUKNgneGxe2PqJ5Swk1-Vf852kpHAP0Mhoj5wd1MVXpRsRr_3KTQo_dkNd_5wcQ__yWnWLxbeM3kg_JziJK","message_id":"0:1455074519347821%df0f8ea7f9fd7ecd"}]}} ``` ```sh diff --git a/_includes/parse-server/third-party-auth.md b/_includes/parse-server/third-party-auth.md index 99c4af095..9682e815b 100644 --- a/_includes/parse-server/third-party-auth.md +++ b/_includes/parse-server/third-party-auth.md @@ -3,17 +3,21 @@ Parse Server supports 3rd party authentication with * Apple +* Apple Game Center * Facebook * Github * Google +* Google Play Game Services * Instagram * Janrain Capture * Janrain Engage * Keycloak * LDAP +* Line * LinkedIn * Meetup * Microsoft Graph +* OAuth * PhantAuth * QQ * Spotify diff --git a/_includes/parse-server/usage.md b/_includes/parse-server/usage.md index ecb0e5bea..65e41db90 100644 --- a/_includes/parse-server/usage.md +++ b/_includes/parse-server/usage.md @@ -5,7 +5,7 @@ Parse Server is meant to be mounted on an [Express](http://expressjs.com/) app. The constructor returns an API object that conforms to an [Express Middleware](http://expressjs.com/en/api.html#app.use). This object provides the REST endpoints for a Parse app. Create an instance like so: ```js -var api = new ParseServer({ +const api = new ParseServer({ databaseURI: 'mongodb://your.mongo.uri', cloud: './cloud/main.js', appId: 'myAppId', @@ -18,7 +18,7 @@ var api = new ParseServer({ The parameters are as follows: -* `databaseURI`: Connection string URI for your MongoDB. +* `databaseURI`: Connection string for your database. * `cloud`: Path to your app’s Cloud Code. * `appId`: A unique identifier for your app. * `fileKey`: A key that specifies a prefix used for file storage. For migrated apps, this is necessary to provide access to files already hosted on Parse. @@ -35,16 +35,16 @@ The parameters are as follows: The Parse Server object was built to be passed directly into `app.use`, which will mount the Parse API at a specified path in your Express app: ```js -var express = require('express'); -var ParseServer = require('parse-server').ParseServer; +const express = require('express'); +const ParseServer = require('parse-server').ParseServer; -var app = express(); -var api = new ParseServer({ ... }); +const app = express(); +const api = new ParseServer({ ... }); // Serve the Parse API at /parse URL prefix app.use('/parse', api); -var port = 1337; +const port = 1337; app.listen(port, function() { console.log('parse-server-example running on port ' + port + '.'); }); diff --git a/_includes/php/objects.md b/_includes/php/objects.md index c4abe9d5d..0df1490cd 100644 --- a/_includes/php/objects.md +++ b/_includes/php/objects.md @@ -37,7 +37,7 @@ After this code runs, you will probably be wondering if anything really happened ```json objectId: "xWMyZ4YEGZ", score: 1337, playerName: "Sean Plott", cheatMode: false, -createdAt:"2011-06-10T18:33:42Z", updatedAt:"2011-06-10T18:33:42Z" +createdAt:"2022-01-01T12:23:45.678Z", updatedAt:"2022-01-01T12:23:45.678Z" ``` There are two things to note here. You didn't have to configure or set up a new Class called `GameScore` before running this code. Your Parse app lazily creates this Class for you when it first encounters it. diff --git a/_includes/rest/geopoints.md b/_includes/rest/geopoints.md index ede1baab7..a8bd05c1d 100644 --- a/_includes/rest/geopoints.md +++ b/_includes/rest/geopoints.md @@ -96,8 +96,8 @@ This will return a list of results ordered by distance from 30.0 latitude and -2 "__type": "GeoPoint", "longitude": -30.0 }, - "updatedAt": "2011-12-06T22:36:04.983Z", - "createdAt": "2011-12-06T22:36:04.983Z", + "updatedAt": "2022-01-01T12:23:45.678Z", + "createdAt": "2022-01-01T12:23:45.678Z", "objectId": "iFEPN5Gwoz" }, { @@ -106,8 +106,8 @@ This will return a list of results ordered by distance from 30.0 latitude and -2 "__type": "GeoPoint", "longitude": -20.0 }, - "updatedAt": "2011-12-06T22:36:26.143Z", - "createdAt": "2011-12-06T22:36:26.143Z", + "updatedAt": "2022-01-01T12:23:45.678Z", + "createdAt": "2022-01-01T12:23:45.678Z", "objectId": "LAyNKSNTHT" } ] diff --git a/_includes/rest/objects.md b/_includes/rest/objects.md index 15d46952e..678d1bdc2 100644 --- a/_includes/rest/objects.md +++ b/_includes/rest/objects.md @@ -25,8 +25,8 @@ When you retrieve objects from Parse, some fields are automatically added: `crea "score": 1337, "playerName": "Sean Plott", "cheatMode": false, - "createdAt": "2011-08-20T02:06:57.931Z", - "updatedAt": "2011-08-20T02:06:57.931Z", + "createdAt": "2022-01-01T12:23:45.678Z", + "updatedAt": "2022-01-01T12:23:45.678Z", "objectId": "Ed1nuqPvcm" } ``` @@ -94,7 +94,7 @@ The response body is a JSON object containing the `objectId` and the `createdAt` ```json { - "createdAt": "2011-08-20T02:06:57.931Z", + "createdAt": "2022-01-01T12:23:45.678Z", "objectId": "Ed1nuqPvcm" } ``` @@ -134,8 +134,8 @@ The response body is a JSON object containing all the user-provided fields, plus "pwnage", "flying" ], - "createdAt": "2011-08-20T02:06:57.931Z", - "updatedAt": "2011-08-20T02:06:57.931Z", + "createdAt": "2022-01-01T12:23:45.678Z", + "updatedAt": "2022-01-01T12:23:45.678Z", "objectId": "Ed1nuqPvcm" } ``` @@ -227,7 +227,7 @@ The response body is a JSON object containing just an `updatedAt` field with the ```json { - "updatedAt": "2011-08-21T18:02:52.248Z" + "updatedAt": "2022-01-01T12:23:45.678Z" } ``` @@ -533,7 +533,7 @@ The response from batch will be a list with the same number of elements as the i ```json { "success": { - "createdAt": "2012-06-15T16:59:11.276Z", + "createdAt": "2022-01-01T12:23:45.678Z", "objectId": "YAfSAWwXbL" } } @@ -625,7 +625,7 @@ The `Date` type contains a field `iso` which contains a UTC timestamp stored in ```json { "__type": "Date", - "iso": "2011-08-21T18:02:52.249Z" + "iso": "2022-01-01T12:23:45.678Z" } ``` @@ -637,7 +637,7 @@ curl -X GET \ -H "X-Parse-Application-Id: ${APPLICATION_ID}" \ -H "X-Parse-REST-API-Key: ${REST_API_KEY}" \ -G \ - --data-urlencode 'where={"createdAt":{"$gte":{"__type":"Date","iso":"2011-08-21T18:02:52.249Z"}}}' \ + --data-urlencode 'where={"createdAt":{"$gte":{"__type":"Date","iso":"2022-01-01T12:23:45.678Z"}}}' \ https://YOUR.PARSE-SERVER.HERE/parse/classes/GameScore

@@ -647,7 +647,7 @@ params = urllib.urlencode({"where":json.dumps({
        "createdAt": {
          "$gte": {
            "__type": "Date",
-           "iso": "2011-08-21T18:02:52.249Z"
+           "iso": "2022-01-01T12:23:45.678Z"
          }
        }
      })})
diff --git a/_includes/rest/push-notifications.md b/_includes/rest/push-notifications.md
index 22dc9c830..1a1fc0d43 100644
--- a/_includes/rest/push-notifications.md
+++ b/_includes/rest/push-notifications.md
@@ -73,7 +73,7 @@ The response body is a JSON object containing the `objectId` and the `createdAt`
 
 ```json
 {
-  "createdAt": "2012-04-28T17:41:09.106Z",
+  "createdAt": "2022-01-01T12:23:45.678Z",
   "objectId": "mrmBZvsErB"
 }
 ```
@@ -156,8 +156,8 @@ The response body is a JSON object containing all the user-provided fields, plus
   "channels": [
     ""
   ],
-  "createdAt": "2012-04-28T17:41:09.106Z",
-  "updatedAt": "2012-04-28T17:41:09.106Z",
+  "createdAt": "2022-01-01T12:23:45.678Z",
+  "updatedAt": "2022-01-01T12:23:45.678Z",
   "objectId": "mrmBZvsErB"
 }
 ```
@@ -242,8 +242,8 @@ The return value is a JSON object that contains a results field with a JSON arra
       "channels": [
         ""
       ],
-      "createdAt": "2012-04-28T17:41:09.106Z",
-      "updatedAt": "2012-04-28T17:41:09.106Z",
+      "createdAt": "2022-01-01T12:23:45.678Z",
+      "updatedAt": "2022-01-01T12:23:45.678Z",
       "objectId": "mrmBZvsErB"
     },
     {
@@ -252,8 +252,8 @@ The return value is a JSON object that contains a results field with a JSON arra
       "channels": [
         ""
       ],
-      "createdAt": "2012-04-30T01:52:57.975Z",
-      "updatedAt": "2012-04-30T01:52:57.975Z",
+      "createdAt": "2022-01-01T12:23:45.678Z",
+      "updatedAt": "2022-01-01T12:23:45.678Z",
       "objectId": "sGlvypFQcO"
     }
   ]
diff --git a/_includes/rest/queries.md b/_includes/rest/queries.md
index 12a4f1e4b..15c3a7261 100644
--- a/_includes/rest/queries.md
+++ b/_includes/rest/queries.md
@@ -31,17 +31,17 @@ The return value is a JSON object that contains a `results` field with a JSON ar
   "results": [
     {
       "playerName": "Jang Min Chul",
-      "updatedAt": "2011-08-19T02:24:17.787Z",
+      "updatedAt": "2022-01-01T12:23:45.678Z",
       "cheatMode": false,
-      "createdAt": "2011-08-19T02:24:17.787Z",
+      "createdAt": "2022-01-01T12:23:45.678Z",
       "objectId": "A22v5zRAgd",
       "score": 80075
     },
     {
       "playerName": "Sean Plott",
-      "updatedAt": "2011-08-21T18:02:52.248Z",
+      "updatedAt": "2022-01-01T12:23:45.678Z",
       "cheatMode": false,
-      "createdAt": "2011-08-20T02:06:57.931Z",
+      "createdAt": "2022-01-01T12:23:45.678Z",
       "objectId": "Ed1nuqPvcm",
       "score": 73453
     }
@@ -895,8 +895,8 @@ When the query is issued with an `include` parameter for the key holding this po
   "__type": "Object",
   "className": "Post",
   "objectId": "8TOXdXf3tz",
-  "createdAt": "2011-12-06T20:59:34.428Z",
-  "updatedAt": "2011-12-06T20:59:34.428Z",
+  "createdAt": "2022-01-01T12:23:45.678Z",
+  "updatedAt": "2022-01-01T12:23:45.678Z",
   "otherFields": "willAlsoBeIncluded"
 }
 ```
diff --git a/_includes/rest/roles.md b/_includes/rest/roles.md
index 096d23504..23012f48e 100644
--- a/_includes/rest/roles.md
+++ b/_includes/rest/roles.md
@@ -145,7 +145,7 @@ The response body is a JSON object containing the `objectId` and `createdAt` tim
 
 ```json
 {
-  "createdAt": "2012-04-28T17:41:09.106Z",
+  "createdAt": "2022-01-01T12:23:45.678Z",
   "objectId": "mrmBZvsErB"
 }
 ```
@@ -178,9 +178,9 @@ The response body is a JSON object containing all of the fields on the role:
 
 ```json
 {
-  "createdAt": "2012-04-28T17:41:09.106Z",
+  "createdAt": "2022-01-01T12:23:45.678Z",
   "objectId": "mrmBZvsErB",
-  "updatedAt": "2012-04-28T17:41:09.106Z",
+  "updatedAt": "2022-01-01T12:23:45.678Z",
   "ACL": {
     "*": {
       "read": true
diff --git a/_includes/rest/users.md b/_includes/rest/users.md
index d67cf327d..cc0a23da9 100644
--- a/_includes/rest/users.md
+++ b/_includes/rest/users.md
@@ -52,7 +52,7 @@ The response body is a JSON object containing the `objectId`, the `createdAt` ti
 
 ```json
 {
-  "createdAt": "2011-11-07T20:58:34.448Z",
+  "createdAt": "2022-01-01T12:23:45.678Z",
   "objectId": "g7y9tkhB7O",
   "sessionToken": "r:pnktnjyb996sj4p156gjtp4im"
 }
@@ -97,8 +97,8 @@ The response body is a JSON object containing all the user-provided fields excep
 {
   "username": "cooldude6",
   "phone": "415-392-0202",
-  "createdAt": "2011-11-07T20:58:34.448Z",
-  "updatedAt": "2011-11-07T20:58:34.448Z",
+  "createdAt": "2022-01-01T12:23:45.678Z",
+  "updatedAt": "2022-01-01T12:23:45.678Z",
   "objectId": "g7y9tkhB7O",
   "sessionToken": "r:pnktnjyb996sj4p156gjtp4im"
 }
@@ -204,8 +204,8 @@ The response body is a JSON object containing all the user-provided fields excep
 {
   "username": "cooldude6",
   "phone": "415-392-0202",
-  "createdAt": "2011-11-07T20:58:34.448Z",
-  "updatedAt": "2011-11-07T20:58:34.448Z",
+  "createdAt": "2022-01-01T12:23:45.678Z",
+  "updatedAt": "2022-01-01T12:23:45.678Z",
   "objectId": "g7y9tkhB7O"
 }
 ```
@@ -284,7 +284,7 @@ The response body is a JSON object containing just an `updatedAt` field with the
 
 ```json
 {
-  "updatedAt": "2011-11-07T21:25:10.623Z"
+  "updatedAt": "2022-01-01T12:23:45.678Z"
 }
 ```
 
@@ -320,15 +320,15 @@ The return value is a JSON object that contains a `results` field with a JSON ar
     {
       "username": "bigglesworth",
       "phone": "650-253-0000",
-      "createdAt": "2011-11-07T20:58:06.445Z",
-      "updatedAt": "2011-11-07T20:58:06.445Z",
+      "createdAt": "2022-01-01T12:23:45.678Z",
+      "updatedAt": "2022-01-01T12:23:45.678Z",
       "objectId": "3KmCvT7Zsb"
     },
     {
       "username": "cooldude6",
       "phone": "415-369-6201",
-      "createdAt": "2011-11-07T20:58:34.448Z",
-      "updatedAt": "2011-11-07T21:25:10.623Z",
+      "createdAt": "2022-01-01T12:23:45.678Z",
+      "updatedAt": "2022-01-01T12:23:45.678Z",
       "objectId": "g7y9tkhB7O"
     }
   ]
@@ -473,8 +473,8 @@ With a response body like:
 ```json
 {
   "username": "Parse",
-  "createdAt": "2012-02-28T23:49:36.353Z",
-  "updatedAt": "2012-02-28T23:49:36.353Z",
+  "createdAt": "2022-01-01T12:23:45.678Z",
+  "updatedAt": "2022-01-01T12:23:45.678Z",
   "objectId": "uMz0YZeAqc",
   "sessionToken": "r:samplei3l83eerhnln0ecxgy5",
   "authData": {
@@ -502,7 +502,7 @@ The body of the response will contain the `objectId`, `createdAt`, `sessionToken
 ```json
 {
   "username": "iwz8sna7sug28v4eyu7t89fij",
-  "createdAt": "2012-02-28T23:49:36.353Z",
+  "createdAt": "2022-01-01T12:23:45.678Z",
   "objectId": "uMz0YZeAqc",
   "sessionToken": "r:samplei3l83eerhnln0ecxgy5"
 }
@@ -524,7 +524,7 @@ curl -X PUT \
           "facebook": {
             "id": "123456789",
             "access_token": "SaMpLeAAibS7Q55FSzcERWIEmzn6rosftAr7pmDME10008bWgyZAmv7mziwfacNOhWkgxDaBf8a2a2FCc9Hbk9wAsqLYZBLR995wxBvSGNoTrEaL",
-            "expiration_date": "2012-02-28T23:49:36.353Z"
+            "expiration_date": "2022-01-01T12:23:45.678Z"
           }
         }
       }' \
@@ -539,7 +539,7 @@ connection.request('PUT', '/parse/