diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 000000000..38e461d3e --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,14 @@ +version: 2 +updates: + - package-ecosystem: cargo + directory: / + schedule: + interval: weekly + labels: + - "dependencies" + - package-ecosystem: github-actions + directory: / + schedule: + interval: weekly + labels: + - "github actions" \ No newline at end of file diff --git a/.github/workflows/deploy.yml b/.github/workflows/book.yml similarity index 100% rename from .github/workflows/deploy.yml rename to .github/workflows/book.yml diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml new file mode 100644 index 000000000..506593cfe --- /dev/null +++ b/.github/workflows/main.yaml @@ -0,0 +1,115 @@ +name: Main +on: + push: + branches: + - main + paths-ignore: + - ".gitignore" + - ".github/dependabot.yml" + pull_request: + paths-ignore: + - ".gitignore" + - ".github/dependabot.yml" +env: + CARGO_TERM_COLOR: always +jobs: + format: + name: Format + runs-on: ubuntu-latest + steps: + - name: Clone repo + uses: actions/checkout@v4 + + - name: Cache crates + uses: Swatinem/rust-cache@v2 + + - name: Install Taplo + run: cargo install --locked taplo-cli + + - name: Format + run: | + cargo fmt --check + taplo fmt --check + + lint: + name: Lint + runs-on: ubuntu-latest + steps: + - name: Clone repo + uses: actions/checkout@v4 + + - name: Instal stable toolchain + uses: dtolnay/rust-toolchain@stable + + - name: Cache crates + uses: Swatinem/rust-cache@v2 + + - name: Clippy + run: cargo clippy --no-deps --tests -- -D warnings + + - name: Rustdoc + run: cargo rustdoc -- -D warnings + + doctest: + name: Doctest + runs-on: ubuntu-latest + steps: + - name: Clone repo + uses: actions/checkout@v4 + + - name: Instal stable toolchain + uses: dtolnay/rust-toolchain@stable + + - name: Cache crates + uses: Swatinem/rust-cache@v2 + + - name: Test doc + run: cargo test --doc + + test: + name: Test + runs-on: ubuntu-latest + steps: + - name: Clone repo + uses: actions/checkout@v4 + + - name: Instal stable toolchain + uses: dtolnay/rust-toolchain@stable + + - name: Cache crates + uses: Swatinem/rust-cache@v2 + + - name: Install LLVM tools + run: rustup component add llvm-tools-preview + + - name: Install Tarpaulin + run: cargo install cargo-tarpaulin + + - name: Test + run: cargo tarpaulin --engine llvm --out lcov + + - name: Upload code coverage results + if: github.actor != 'dependabot[bot]' + uses: actions/upload-artifact@v3 + with: + name: code-coverage-report + path: lcov.info + + codecov: + name: Upload to Codecov + if: github.actor != 'dependabot[bot]' + needs: [format, lint, doctest, test] + runs-on: ubuntu-latest + steps: + - name: Clone repo + uses: actions/checkout@v4 + + - name: Download code coverage results + uses: actions/download-artifact@v3 + with: + name: code-coverage-report + + - name: Upload to Codecov + uses: codecov/codecov-action@v3 + with: + token: ${{ secrets.CODECOV_TOKEN }} \ No newline at end of file diff --git a/LICENSE-APACHE b/LICENSE-APACHE new file mode 100644 index 000000000..2bb9ad240 --- /dev/null +++ b/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS \ No newline at end of file diff --git a/LICENSE-MIT b/LICENSE-MIT new file mode 100644 index 000000000..6802bc4b8 --- /dev/null +++ b/LICENSE-MIT @@ -0,0 +1,19 @@ +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/NOTES.md b/NOTES.md new file mode 100644 index 000000000..5b65b04c4 --- /dev/null +++ b/NOTES.md @@ -0,0 +1,74 @@ +# Interesting links: + +* https://medium.com/@otukof/breaking-tradition-why-rust-might-be-your-best-first-language-d10afc482ac1 + - use local executors for async, and use one process/thread per core instead of doing multi-threading (more complicated and less performant + - one server: 1 game room per core? + +PROBLEMS: +- SYNC: + - sync only works if we send client updates every frame. Otherwise we need to take a LOT more margin on the server + to make sure that client packets arrive on time. -> MAKE SYNC COMPATIBLE WITH CLIENT UPDATE_INTERVAL (ADD UPDATE_INTERVAL TO MARGIN?) + - SOMETHIGN PROBABLY BREAKS BECAUSE OF THE WRAPPING OF TICK AND WRAPPED-TIME, THINK ABOUT HOW IT WORKS +- if client 1 DC and then reconnects again, we don't get a new cube. +- it looks like disconnects events are not being received? + + +ROUGH EDGES: +- users cannot derive traits on ComponentProtocol or MessageProtocol because we add some extra variants to those enums +- the bitcode/Bytes parts are confusing and make extra copies +- users cannot specify how they serialize messages/components +- some slightly weird stuff around the sync manager +- can have smarter speedup/down for the sync system + + +- Prediction: + - TODO: handle despawns, spawns + - despawn another entity TODO: + - we let the user decide + - in some cases it's ok to let the created entity despawn + - in other cases we would like to only despawn that entity if confirm despawns it (for example a common object) + -> the user should write their systems so that despawns only happen on the confirmed timeline then + - spawn: TODO + i.e. we spawn something that depends on the predicted action (like a particle), but actually we rollback, + which means that we need to kill the spawned entity. + - either we kill immediately if it doesn't get spawned during rollback + - either we let it die naturally; either we fade it out? + -> same, the user should write their systems so that spawns only happen on the confirmed timeline + + - TODO: 2 ways to create predicted entities + - DONE: server-owned: server creates the confirmed entity, when client receives it, it creates a copy which is a predicted entity -> we have this one + - TODO: client-owned: client creates the predicted entity. It sends a message to client, which creates the confirmed entity however it wants + then when client receives the confirmed entity, it just updates the predicted entity to have a full mapping -> WE DONT HAVE THIS ONE YET + +- Replication: + - Fix the enable_replication flag, have a better way to enable/disable replication + - POSSIBLE TODO: send back messages about entity-actions having been received? (we get this for free with reliable channels, but we need to notify the replication manager) + +- Message Manager + - TODO: need to handle any messages/components that contain entity handles + - lookup bevy's entity-mapper + - TODO: run more extensive soak test + +- Packet Manager: + - TODO: Send Keepalive as part of Payload instead of KeepAlive + - so that we can receive ack bitfields frequently (ack bitfields needed for reliable channels not to resend) + - DISABLE NETCODE KEEP-ALIVE AND ROLL-OUT MY OWN WITH KEEPALIVE DATA TYPE! (this works because any packet received counts as keep alive) + - actually, don't need to disable netcode keep-alive, just send payload keep alive more frequently! + - or just prepare an ACK response whenever we receive anything from a reliable sender? (so the reliable sender gets a quick ack bitfield) + - TODO: Pick correct constant values for MTUs, etc. + - TODO: construct the final Packet from Bytes without using WriteBuffer and ReadBuffer, just concat Bytes to avoid having too many copies + +- Channels: + - TODO: add channel priority with accumulation. Some channels need infinite priority though (such as pings) + - TODO: add a tick buffer so that inputs from client arrive on the same corresponding tick in the server. + - in general the tick buffer can be used to associate an event with a tick, and make sure it is received on the same corresponding tick in remote + +- UI: + - TODO: UI that lets us see which packets are sent at every system update? + +- Metrics/Logs: + - add more metrics + - think more about log levels. Can we enable sub-level logs via filters? for example enable all prediction logs, etc. + +- Reflection: + - when can we use this? \ No newline at end of file diff --git a/README.md b/README.md index 5b65b04c4..60357b082 100644 --- a/README.md +++ b/README.md @@ -1,74 +1,7 @@ -# Interesting links: +# Lightyear -* https://medium.com/@otukof/breaking-tradition-why-rust-might-be-your-best-first-language-d10afc482ac1 - - use local executors for async, and use one process/thread per core instead of doing multi-threading (more complicated and less performant - - one server: 1 game room per core? +A library for writing server-authoritative multiplayer games with [Bevy](https://bevyengine.org/). -PROBLEMS: -- SYNC: - - sync only works if we send client updates every frame. Otherwise we need to take a LOT more margin on the server - to make sure that client packets arrive on time. -> MAKE SYNC COMPATIBLE WITH CLIENT UPDATE_INTERVAL (ADD UPDATE_INTERVAL TO MARGIN?) - - SOMETHIGN PROBABLY BREAKS BECAUSE OF THE WRAPPING OF TICK AND WRAPPED-TIME, THINK ABOUT HOW IT WORKS -- if client 1 DC and then reconnects again, we don't get a new cube. -- it looks like disconnects events are not being received? - -ROUGH EDGES: -- users cannot derive traits on ComponentProtocol or MessageProtocol because we add some extra variants to those enums -- the bitcode/Bytes parts are confusing and make extra copies -- users cannot specify how they serialize messages/components -- some slightly weird stuff around the sync manager -- can have smarter speedup/down for the sync system - - -- Prediction: - - TODO: handle despawns, spawns - - despawn another entity TODO: - - we let the user decide - - in some cases it's ok to let the created entity despawn - - in other cases we would like to only despawn that entity if confirm despawns it (for example a common object) - -> the user should write their systems so that despawns only happen on the confirmed timeline then - - spawn: TODO - i.e. we spawn something that depends on the predicted action (like a particle), but actually we rollback, - which means that we need to kill the spawned entity. - - either we kill immediately if it doesn't get spawned during rollback - - either we let it die naturally; either we fade it out? - -> same, the user should write their systems so that spawns only happen on the confirmed timeline - - - TODO: 2 ways to create predicted entities - - DONE: server-owned: server creates the confirmed entity, when client receives it, it creates a copy which is a predicted entity -> we have this one - - TODO: client-owned: client creates the predicted entity. It sends a message to client, which creates the confirmed entity however it wants - then when client receives the confirmed entity, it just updates the predicted entity to have a full mapping -> WE DONT HAVE THIS ONE YET - -- Replication: - - Fix the enable_replication flag, have a better way to enable/disable replication - - POSSIBLE TODO: send back messages about entity-actions having been received? (we get this for free with reliable channels, but we need to notify the replication manager) - -- Message Manager - - TODO: need to handle any messages/components that contain entity handles - - lookup bevy's entity-mapper - - TODO: run more extensive soak test - -- Packet Manager: - - TODO: Send Keepalive as part of Payload instead of KeepAlive - - so that we can receive ack bitfields frequently (ack bitfields needed for reliable channels not to resend) - - DISABLE NETCODE KEEP-ALIVE AND ROLL-OUT MY OWN WITH KEEPALIVE DATA TYPE! (this works because any packet received counts as keep alive) - - actually, don't need to disable netcode keep-alive, just send payload keep alive more frequently! - - or just prepare an ACK response whenever we receive anything from a reliable sender? (so the reliable sender gets a quick ack bitfield) - - TODO: Pick correct constant values for MTUs, etc. - - TODO: construct the final Packet from Bytes without using WriteBuffer and ReadBuffer, just concat Bytes to avoid having too many copies - -- Channels: - - TODO: add channel priority with accumulation. Some channels need infinite priority though (such as pings) - - TODO: add a tick buffer so that inputs from client arrive on the same corresponding tick in the server. - - in general the tick buffer can be used to associate an event with a tick, and make sure it is received on the same corresponding tick in remote - -- UI: - - TODO: UI that lets us see which packets are sent at every system update? - -- Metrics/Logs: - - add more metrics - - think more about log levels. Can we enable sub-level logs via filters? for example enable all prediction logs, etc. - -- Reflection: - - when can we use this? \ No newline at end of file +You can find more information in this WIP [book](https://cbournhonesque.github.io/lightyear/book/). +To quickly get started, you can look at this [tutorial](https://cbournhonesque.github.io/lightyear/book/tutorial/title.html). \ No newline at end of file diff --git a/book/src/introduction.md b/book/src/introduction.md index 2dd1ed02e..20b2c3f98 100644 --- a/book/src/introduction.md +++ b/book/src/introduction.md @@ -1,16 +1,14 @@ # Introduction +For a quick tutorial about how to use the crate, click [here](./tutorial/title.md). + ## What is lightyear? Lightyear is a networking library for games written in Bevy. - It uses a client-server networking architecture, where the server is authoritative over the game state. It is heavily inspired by [naia](https://github.com/naia-lib/naia). -It implements concepts from: -- GafferOnGames -- GDC overwatch and rocketleague talks ## What is this book about? @@ -24,12 +22,11 @@ It is more of a collection of notes and thoughts that I had while developing the I could come back to later to remember why I did things a certain way. - ## Who am I? -I am the main developer of the lightyear library. I don't have a lot of experience in Rust, and have never worked on game development. I picked up Bevy a couple years ago and got really interested in game-dev, specifically in networking. +I've started reading more networking resources such as [GafferOnGames](https://gafferongames.com/) or the Overwatch and RocketLeague GDC talks. I decided to write this crate to help me get better at Rust, get hands-on knowledge of networking for games, and of course to provide a useful library for the community. \ No newline at end of file diff --git a/shared/derive/src/component.rs b/shared/derive/src/component.rs index 0a2ea5107..5c2f02211 100644 --- a/shared/derive/src/component.rs +++ b/shared/derive/src/component.rs @@ -10,7 +10,7 @@ struct MacroAttrs { protocol: Ident, } -const ATTRIBUTES: &'static [&'static str] = &["sync"]; +const ATTRIBUTES: &[&str] = &["sync"]; #[derive(Debug, FromField)] #[darling(attributes(sync))] @@ -79,13 +79,13 @@ pub fn component_protocol_impl( let attr_args = match NestedMeta::parse_meta_list(args.into()) { Ok(v) => v, Err(e) => { - return TokenStream::from(Error::from(e).write_errors()).into(); + return Error::from(e).write_errors().into(); } }; let attr = match MacroAttrs::from_list(&attr_args) { Ok(v) => v, Err(e) => { - return TokenStream::from(e.write_errors()).into(); + return e.write_errors().into(); } }; let protocol = &attr.protocol; @@ -108,7 +108,7 @@ pub fn component_protocol_impl( let sync_fields: Vec = fields .iter() .filter(|field| field.attrs.iter().any(|attr| attr.path().is_ident("sync"))) - .map(|field| FromField::from_field(&field).unwrap()) + .map(|field| FromField::from_field(field).unwrap()) .collect(); for field in &sync_fields { field.check_is_valid(); @@ -222,11 +222,10 @@ fn strip_attributes(input: &ItemEnum) -> ItemEnum { for variant in input.variants.iter_mut() { // remove all attributes that are used in this macro variant.attrs.retain(|v| { - v.path().segments.first().map_or(true, |s| { - ATTRIBUTES - .iter() - .all(|attr| attr.to_string() != s.ident.to_string()) - }) + v.path() + .segments + .first() + .map_or(true, |s| ATTRIBUTES.iter().all(|attr| s.ident != *attr)) }) } input @@ -423,7 +422,7 @@ fn from_method(input: &ItemEnum, enum_kind_name: &Ident) -> TokenStream { } } -fn into_kind_method(input: &ItemEnum, fields: &Vec, enum_kind_name: &Ident) -> TokenStream { +fn into_kind_method(input: &ItemEnum, fields: &[Field], enum_kind_name: &Ident) -> TokenStream { let component_kind_names = input.variants.iter().map(|v| &v.ident); let component_types = fields.iter().map(|field| &field.ty); @@ -441,7 +440,7 @@ fn into_kind_method(input: &ItemEnum, fields: &Vec, enum_kind_name: &Iden field_body } -fn remove_method(input: &ItemEnum, fields: &Vec, enum_kind_name: &Ident) -> TokenStream { +fn remove_method(input: &ItemEnum, fields: &[Field], enum_kind_name: &Ident) -> TokenStream { let component_kind_names = input.variants.iter().map(|v| &v.ident); let component_types = fields.iter().map(|field| &field.ty); diff --git a/shared/derive/src/message.rs b/shared/derive/src/message.rs index 436fb8859..78fb2e69b 100644 --- a/shared/derive/src/message.rs +++ b/shared/derive/src/message.rs @@ -56,13 +56,13 @@ pub fn message_protocol_impl( let attr_args = match NestedMeta::parse_meta_list(args.into()) { Ok(v) => v, Err(e) => { - return TokenStream::from(Error::from(e).write_errors()).into(); + return Error::from(e).write_errors().into(); } }; let attr = match MacroAttrs::from_list(&attr_args) { Ok(v) => v, Err(e) => { - return TokenStream::from(e.write_errors()).into(); + return e.write_errors().into(); } }; let protocol = &attr.protocol; @@ -91,7 +91,7 @@ pub fn message_protocol_impl( let encode_method = encode_method(); let decode_method = decode_method(); - let from_into_methods = from_into_methods(&input, &fields, &enum_name); + let from_into_methods = from_into_methods(&input, &fields, enum_name); let output = quote! { #[doc(hidden)] @@ -193,7 +193,7 @@ fn name_method(input: &ItemEnum) -> TokenStream { } } -fn from_into_methods(input: &ItemEnum, fields: &Vec<&Field>, enum_name: &Ident) -> TokenStream { +fn from_into_methods(input: &ItemEnum, fields: &[&Field], enum_name: &Ident) -> TokenStream { let enum_name = &input.ident; let variants = input.variants.iter().map(|v| v.ident.clone()); let mut body = quote! {}; diff --git a/shared/src/channel/senders/fragment_sender.rs b/shared/src/channel/senders/fragment_sender.rs index 8123f3f2d..49a70a7ed 100644 --- a/shared/src/channel/senders/fragment_sender.rs +++ b/shared/src/channel/senders/fragment_sender.rs @@ -64,7 +64,7 @@ mod tests { let expected_num_fragments = 3; assert_eq!(fragments.len(), expected_num_fragments); assert_eq!( - fragments.get(0).unwrap(), + fragments.first().unwrap(), &FragmentData { message_id, tick: None, diff --git a/shared/src/channel/senders/reliable.rs b/shared/src/channel/senders/reliable.rs index 3cd0f5043..b2c9963b6 100644 --- a/shared/src/channel/senders/reliable.rs +++ b/shared/src/channel/senders/reliable.rs @@ -256,7 +256,7 @@ mod tests { // Buffer a new message let mut message1 = Bytes::from("hello"); - sender.buffer_send(message1.clone().into()); + sender.buffer_send(message1.clone()); assert_eq!(sender.unacked_messages.len(), 1); assert_eq!(sender.next_send_message_id, MessageId(1)); // Collect the messages to be sent @@ -273,7 +273,7 @@ mod tests { sender.collect_messages_to_send(); assert_eq!(sender.single_messages_to_send.len(), 1); assert_eq!( - sender.single_messages_to_send.get(0).unwrap(), + sender.single_messages_to_send.front().unwrap(), &SingleData::new(Some(MessageId(0)), message1.clone()) ); diff --git a/shared/src/client/prediction/mod.rs b/shared/src/client/prediction/mod.rs index 12d9ebb27..06a89c469 100644 --- a/shared/src/client/prediction/mod.rs +++ b/shared/src/client/prediction/mod.rs @@ -77,7 +77,7 @@ pub fn spawn_predicted_entity( ) { for (confirmed_entity, mut confirmed) in confirmed_entities.iter_mut() { // spawn a new predicted entity - let predicted_entity_mut = commands.spawn((Predicted { confirmed_entity })); + let predicted_entity_mut = commands.spawn(Predicted { confirmed_entity }); let predicted_entity = predicted_entity_mut.id(); // add Confirmed to the confirmed entity @@ -86,12 +86,10 @@ pub fn spawn_predicted_entity( if let Some(mut confirmed) = confirmed { confirmed.predicted = Some(predicted_entity); } else { - confirmed_entity_mut.insert( - (Confirmed { - predicted: Some(predicted_entity), - interpolated: None, - }), - ); + confirmed_entity_mut.insert(Confirmed { + predicted: Some(predicted_entity), + interpolated: None, + }); } info!( "Spawn predicted entity {:?} for confirmed: {:?}", diff --git a/shared/src/client/prediction/predicted_history.rs b/shared/src/client/prediction/predicted_history.rs index 26365aa43..d0e00b67a 100644 --- a/shared/src/client/prediction/predicted_history.rs +++ b/shared/src/client/prediction/predicted_history.rs @@ -98,42 +98,6 @@ impl PredictionHistory { // } } -#[cfg(test)] -mod tests { - // use super::*; - // - // #[derive(Component, Clone, PartialEq, Eq, Debug)] - // pub struct A(u32); - // - // #[test] - // fn test_component_history() { - // let mut component_history = ComponentHistory::new(); - // - // // check when we try to access a value when the buffer is empty - // assert_eq!(component_history.get_history_at_tick(Tick(0)), None); - // - // // check when we try to access an exact tick - // component_history.buffer.add_item(Tick(1), A(1)); - // component_history.buffer.add_item(Tick(2), A(2)); - // assert_eq!(component_history.get_history_at_tick(Tick(2)), Some(A(2))); - // // check that we cleared older ticks - // assert!(component_history.buffer.is_empty()); - // - // // check when we try to access a value in-between ticks - // component_history.buffer.add_item(Tick(1), A(1)); - // component_history.buffer.add_item(Tick(3), A(3)); - // assert_eq!(component_history.get_history_at_tick(Tick(2)), Some(A(1))); - // assert_eq!(component_history.buffer.len(), 1); - // assert_eq!(component_history.get_history_at_tick(Tick(4)), Some(A(3))); - // assert!(component_history.buffer.is_empty()); - // - // // check when we try to access a value before any ticks - // component_history.buffer.add_item(Tick(1), A(1)); - // assert_eq!(component_history.get_history_at_tick(Tick(0)), None); - // assert_eq!(component_history.buffer.len(), 1); - // } -} - // This system: // - when we receive a confirmed entity, we will create a predicted entity // - when that predicted entity is created, we need to copy all components from the confirmed entity to the predicted entity, and create ComponentHistories @@ -303,3 +267,39 @@ pub(crate) fn apply_confirmed_update( } } } + +#[cfg(test)] +mod tests { + // use super::*; + // + // #[derive(Component, Clone, PartialEq, Eq, Debug)] + // pub struct A(u32); + // + // #[test] + // fn test_component_history() { + // let mut component_history = ComponentHistory::new(); + // + // // check when we try to access a value when the buffer is empty + // assert_eq!(component_history.get_history_at_tick(Tick(0)), None); + // + // // check when we try to access an exact tick + // component_history.buffer.add_item(Tick(1), A(1)); + // component_history.buffer.add_item(Tick(2), A(2)); + // assert_eq!(component_history.get_history_at_tick(Tick(2)), Some(A(2))); + // // check that we cleared older ticks + // assert!(component_history.buffer.is_empty()); + // + // // check when we try to access a value in-between ticks + // component_history.buffer.add_item(Tick(1), A(1)); + // component_history.buffer.add_item(Tick(3), A(3)); + // assert_eq!(component_history.get_history_at_tick(Tick(2)), Some(A(1))); + // assert_eq!(component_history.buffer.len(), 1); + // assert_eq!(component_history.get_history_at_tick(Tick(4)), Some(A(3))); + // assert!(component_history.buffer.is_empty()); + // + // // check when we try to access a value before any ticks + // component_history.buffer.add_item(Tick(1), A(1)); + // assert_eq!(component_history.get_history_at_tick(Tick(0)), None); + // assert_eq!(component_history.buffer.len(), 1); + // } +} diff --git a/shared/src/connection/events.rs b/shared/src/connection/events.rs index 74178d67e..5b3e50b52 100644 --- a/shared/src/connection/events.rs +++ b/shared/src/connection/events.rs @@ -186,7 +186,7 @@ impl IterMessageEvent

for ConnectionEvents

{ }) })); } - return Box::new(iter::empty()); + Box::new(iter::empty()) } fn has_messages(&self) -> bool { @@ -249,7 +249,7 @@ impl IterComponentUpdateEvent

for ConnectionEvents

{ if let Some(data) = self.component_updates.remove(&component_kind) { return Box::new(data.into_iter().map(|entity| (entity, ()))); } - return Box::new(iter::empty()); + Box::new(iter::empty()) } fn has_component_update(&self) -> bool @@ -283,7 +283,7 @@ impl IterComponentRemoveEvent

for ConnectionEvents

{ if let Some(data) = self.component_removes.remove(&component_kind) { return Box::new(data.into_iter().map(|entity| (entity, ()))); } - return Box::new(iter::empty()); + Box::new(iter::empty()) } fn has_component_remove(&self) -> bool @@ -317,7 +317,7 @@ impl IterComponentInsertEvent

for ConnectionEvents

{ if let Some(data) = self.component_inserts.remove(&component_kind) { return Box::new(data.into_iter().map(|entity| (entity, ()))); } - return Box::new(iter::empty()); + Box::new(iter::empty()) } fn has_component_insert(&self) -> bool diff --git a/shared/src/connection/mod.rs b/shared/src/connection/mod.rs index b21a50f36..3713029a5 100644 --- a/shared/src/connection/mod.rs +++ b/shared/src/connection/mod.rs @@ -1,8 +1,8 @@ -/// Defines a [`ConnectionEvents`] struct that is used to store all events that are received from a [`Connection`] +/// Defines a [`ConnectionEvents`](ConnectionEvents) struct that is used to store all events that are received from a [`Connection`](Connection) // only public for proc macro pub mod events; -/// Provides a [`ProtocolMessage`] enum that is a wrapper around all the possible messages that can be sent over the network +/// Provides a [`ProtocolMessage`](ProtocolMessage) enum that is a wrapper around all the possible messages that can be sent over the network pub(crate) mod message; use crate::_reexport::PingChannel; @@ -33,8 +33,7 @@ use crate::utils::named::Named; // Also it would be inefficient because we would send separate packets for messages or replications, even though // we can put them in the same packet -/// Wrapper to: send/receive messages via channels to a remote address -/// By splitting the data into packets and sending them through a given transport +/// Wrapper to send/receive messages via channels to a remote address pub struct Connection { pub ping_manager: PingManager, pub message_manager: MessageManager>, diff --git a/shared/src/netcode/server.rs b/shared/src/netcode/server.rs index 1aba09297..15c2cdde9 100644 --- a/shared/src/netcode/server.rs +++ b/shared/src/netcode/server.rs @@ -887,7 +887,7 @@ impl Server { self.conn_cache .clients .iter() - .filter_map(|(id, c)| c.is_connected().then_some(id.clone())) + .filter_map(|(id, c)| c.is_connected().then_some(*id)) } pub fn client_ids(&self) -> impl Iterator + '_ { diff --git a/shared/src/packet/header.rs b/shared/src/packet/header.rs index 4206e7e1f..108d26244 100644 --- a/shared/src/packet/header.rs +++ b/shared/src/packet/header.rs @@ -238,8 +238,8 @@ impl ReceiveBuffer { // iter goes from the item pushed the longest ago (to the left of the bitfield) // to the items pushed most recently (to the right of the bitfield) - for (_i, &exists) in self.buffer.iter().enumerate() { - if exists { + for exists in self.buffer.iter() { + if *exists { ack_bitfield |= mask; } mask >>= 1; @@ -299,13 +299,13 @@ mod tests { // receive a packet at the max far ahead // diff == ACK_BITFIELD_SIZE - let mut recv_buffer = add_most_recent_packet(recv_buffer, 82, 1 << 32 - 1); + let mut recv_buffer = add_most_recent_packet(recv_buffer, 82, 1 << (32 - 1)); // receive a packet that is too far in the past // diff_id < -ACK_BITFIELD_SIZE recv_buffer.recv_packet(PacketId(49)); assert_eq!(recv_buffer.last_recv_packet_id, Some(PacketId(82))); - assert_eq!(recv_buffer.get_bitfield(), 1 << 32 - 1); + assert_eq!(recv_buffer.get_bitfield(), 1 << (32 - 1)); } #[test] diff --git a/shared/src/packet/message.rs b/shared/src/packet/message.rs index 69647c5c1..5a848af54 100644 --- a/shared/src/packet/message.rs +++ b/shared/src/packet/message.rs @@ -256,8 +256,7 @@ impl FragmentData { let tick = reader.decode::>(Fixed)?; let fragment_id = reader.decode::(Gamma)?; let num_fragments = reader.decode::(Gamma)?; - let bytes: Bytes; - if fragment_id == num_fragments - 1 { + let bytes = if fragment_id == num_fragments - 1 { // let num_bytes = reader.decode::(Gamma)?; // let num_bytes_non_zero = std::num::NonZeroUsize::new(num_bytes) // .ok_or_else(|| anyhow::anyhow!("num_bytes is 0"))?; @@ -267,14 +266,14 @@ impl FragmentData { // TODO: avoid the extra copy // - maybe have the encoding of bytes be let read_bytes = reader.decode::>(Fixed)?; - bytes = Bytes::from(read_bytes); + Bytes::from(read_bytes) } else { // Serde does not handle arrays well (https://github.com/serde-rs/serde/issues/573) let read_bytes = reader.decode::<[u8; FRAGMENT_SIZE]>(Fixed)?; // TODO: avoid the extra copy let bytes_vec: Vec = read_bytes.to_vec(); - bytes = Bytes::from(bytes_vec); - } + Bytes::from(bytes_vec) + }; Ok(Self { message_id, tick, @@ -388,7 +387,7 @@ mod tests { // dbg!(a); let bytes = writer.finish_write(); - let mut reader = ReadWordBuffer::start_read(bytes.as_ref()); + let mut reader = ReadWordBuffer::start_read(bytes); let decoded = SingleData::decode(&mut reader).unwrap(); // dbg!(bitvec::vec::BitVec::::from_slice(&bytes)); @@ -416,7 +415,7 @@ mod tests { // dbg!(a); let bytes = writer.finish_write(); - let mut reader = ReadWordBuffer::start_read(bytes.as_ref()); + let mut reader = ReadWordBuffer::start_read(bytes); let decoded = FragmentData::decode(&mut reader).unwrap(); // dbg!(bitvec::vec::BitVec::::from_slice(&bytes)); diff --git a/shared/src/packet/message_manager.rs b/shared/src/packet/message_manager.rs index 891688394..772c3eeed 100644 --- a/shared/src/packet/message_manager.rs +++ b/shared/src/packet/message_manager.rs @@ -282,7 +282,7 @@ mod tests { MessageManager::::new(&channel_registry); // client: buffer send messages, and then send - let mut message = MyMessageProtocol::Message1(Message1(1)); + let message = MyMessageProtocol::Message1(Message1(1)); let channel_kind_1 = ChannelKind::of::(); let channel_kind_2 = ChannelKind::of::(); client_message_manager.buffer_send(message.clone(), channel_kind_1)?; @@ -293,7 +293,7 @@ mod tests { HashMap::from([( PacketId(0), HashMap::from([( - channel_kind_1.clone(), + channel_kind_1, vec![MessageAck { message_id: MessageId(0), fragment_id: None, @@ -303,9 +303,9 @@ mod tests { ); // server: receive bytes from the sent messages, then process them into messages - for mut packet_byte in packet_bytes.iter_mut() { + for packet_byte in packet_bytes.iter_mut() { server_message_manager - .recv_packet(&mut ReadWordBuffer::start_read(&packet_byte.as_slice()))?; + .recv_packet(&mut ReadWordBuffer::start_read(packet_byte.as_slice()))?; } let mut data = server_message_manager.read_messages(); assert_eq!(data.get(&channel_kind_1).unwrap(), &vec![message.clone()]); @@ -336,7 +336,7 @@ mod tests { // On client side: keep looping to receive bytes on the network, then process them into messages for mut packet_byte in packet_bytes.iter_mut() { client_message_manager - .recv_packet(&mut ReadWordBuffer::start_read(&packet_byte.as_slice()))?; + .recv_packet(&mut ReadWordBuffer::start_read(packet_byte.as_slice()))?; } // Check that reliability works correctly @@ -373,7 +373,7 @@ mod tests { // client: buffer send messages, and then send let message_size = (1.5 * FRAGMENT_SIZE as f32) as usize; - let mut message = MyMessageProtocol::Message2(Message2(vec![1; message_size])); + let message = MyMessageProtocol::Message2(Message2(vec![1; message_size])); let channel_kind_1 = ChannelKind::of::(); let channel_kind_2 = ChannelKind::of::(); client_message_manager.buffer_send(message.clone(), channel_kind_1)?; @@ -386,7 +386,7 @@ mod tests { ( PacketId(0), HashMap::from([( - channel_kind_1.clone(), + channel_kind_1, vec![MessageAck { message_id: MessageId(0), fragment_id: Some(0), @@ -396,7 +396,7 @@ mod tests { ( PacketId(1), HashMap::from([( - channel_kind_1.clone(), + channel_kind_1, vec![MessageAck { message_id: MessageId(0), fragment_id: Some(1), @@ -407,9 +407,9 @@ mod tests { ); // server: receive bytes from the sent messages, then process them into messages - for mut packet_byte in packet_bytes.iter_mut() { + for packet_byte in packet_bytes.iter_mut() { server_message_manager - .recv_packet(&mut ReadWordBuffer::start_read(&packet_byte.as_slice()))?; + .recv_packet(&mut ReadWordBuffer::start_read(packet_byte.as_slice()))?; } let mut data = server_message_manager.read_messages(); assert_eq!(data.get(&channel_kind_1).unwrap(), &vec![message.clone()]); @@ -444,9 +444,9 @@ mod tests { let mut packet_bytes = server_message_manager.send_packets(Tick(0))?; // On client side: keep looping to receive bytes on the network, then process them into messages - for mut packet_byte in packet_bytes.iter_mut() { + for packet_byte in packet_bytes.iter_mut() { client_message_manager - .recv_packet(&mut ReadWordBuffer::start_read(&packet_byte.as_slice()))?; + .recv_packet(&mut ReadWordBuffer::start_read(packet_byte.as_slice()))?; } // Check that reliability works correctly diff --git a/shared/src/packet/mod.rs b/shared/src/packet/mod.rs index 0def794e4..253bad87a 100644 --- a/shared/src/packet/mod.rs +++ b/shared/src/packet/mod.rs @@ -1,30 +1,34 @@ //! # Packet -//! This module defines the concept of a [`packet::Packet`], which is a byte array that will be sent over the network. -//! A [`packet::Packet`] has a maximum size that depends on the transport (around 1400 bytes for UDP), and is +//! This module defines the concept of a [`packet`] which is a byte array that will be sent over the network. +//! A [`packet`] has a maximum size that depends on the transport (around 1400 bytes for UDP), and is //! composed of a header and a payload. //! //! The header will compute important information such as the packet sequence number, the packet type, etc. //! as well as information to handle the ack system. //! //! The payload is a list of messages that are included in the packet. Messages will be included in the packet -//! in order of [`crate::channel::builder::Channel`] priority. +//! in order of [`channel`] priority. //! -//! Packets that are over the maximum packet size will be fragmented into multiple [`packet::FragmentedPacket`]. +//! Packets that are over the maximum packet size will be fragmented into multiple [`FragmentedPacket`]. +//! +//! [`packet`]: crate::packet::packet::Packet +//! [`channel`]: crate::channel::builder::Channel +//! [`FragmentedPacket`]: crate::packet::packet::FragmentedPacket -/// Manages the [`header::PacketHeader`] which includes important packet information +/// Manages the [`PacketHeader`](crate::packet::header::PacketHeader) which includes important packet information pub mod header; -/// Defines the [`message::Message`] struct, which is a piece of serializable data +/// Defines the [`Message`](message::Message) struct, which is a piece of serializable data pub mod message; -/// Manages sending and receiving [`packet::Packet`]s over the network +/// Manages sending and receiving [`Packets`](crate::packet::packet::Packet) over the network pub mod message_manager; -/// Defines the [`packet::Packet`] struct +/// Defines the [`Packet`](crate::packet::packet::Packet) struct pub mod packet; -/// Manages building a single [`packet::Packet`] from multiple [`message::Message`]s +/// Manages building a single [`Packet`](crate::packet::packet::Packet) from multiple [`Messages`](crate::packet::message::Message) pub(crate) mod packet_manager; -/// Defines the [`packet::PacketType`] enum +/// Defines the [`PacketType`](crate::packet::packet_type::PacketType) enum mod packet_type; diff --git a/shared/src/packet/packet.rs b/shared/src/packet/packet.rs index 1907c71fc..b518ad42c 100644 --- a/shared/src/packet/packet.rs +++ b/shared/src/packet/packet.rs @@ -98,7 +98,7 @@ impl SinglePacket { #[cfg(test)] pub fn num_messages(&self) -> usize { - self.data.iter().map(|(_, messages)| messages.len()).sum() + self.data.values().map(|messages| messages.len()).sum() } } @@ -426,9 +426,9 @@ mod tests { let message2 = SingleData::new(None, Bytes::from("world")); let message3 = SingleData::new(None, Bytes::from("!")); - packet.add_message(0, message1.clone().into()); - packet.add_message(0, message2.clone().into()); - packet.add_message(1, message3.clone().into()); + packet.add_message(0, message1.clone()); + packet.add_message(0, message2.clone()); + packet.add_message(1, message3.clone()); // add a channel with no messages packet.add_channel(2); @@ -495,9 +495,9 @@ mod tests { let message2 = SingleData::new(None, Bytes::from("world")); let message3 = SingleData::new(None, Bytes::from("!")); - packet.packet.add_message(0, message1.clone().into()); - packet.packet.add_message(0, message2.clone().into()); - packet.packet.add_message(1, message3.clone().into()); + packet.packet.add_message(0, message1.clone()); + packet.packet.add_message(0, message2.clone()); + packet.packet.add_message(1, message3.clone()); // add a channel with no messages packet.packet.add_channel(2); diff --git a/shared/src/packet/packet_manager.rs b/shared/src/packet/packet_manager.rs index 9d34a9789..2e7f32964 100644 --- a/shared/src/packet/packet_manager.rs +++ b/shared/src/packet/packet_manager.rs @@ -384,9 +384,9 @@ impl PacketManager { 'packet: loop { // Can we write the channel id? If not, start a new packet (and add the channel id) if single_packet.is_none() - || single_packet.as_mut().is_some_and(|mut p| { - !self.can_add_channel_to_packet(&channel_id, &mut p).unwrap() - }) + || single_packet + .as_mut() + .is_some_and(|p| !self.can_add_channel_to_packet(&channel_id, p).unwrap()) { let mut packet = self.build_new_single_packet(); // single_packet = Some(self.build_new_single_packet()); @@ -657,29 +657,14 @@ mod tests { let small_message = Bytes::from("hello"); let mut packet = manager.build_new_single_packet(); - assert_eq!( - manager.can_add_channel_to_packet(channel_id, &mut packet)?, - true - ); + assert!(manager.can_add_channel_to_packet(channel_id, &mut packet)?,); - assert_eq!( - manager.can_add_bits(small_message.len() * (u8::BITS as usize)), - true - ); - packet.add_message( - channel_id.clone(), - SingleData::new(None, small_message.clone()), - ); + assert!(manager.can_add_bits(small_message.len() * (u8::BITS as usize)),); + packet.add_message(*channel_id, SingleData::new(None, small_message.clone())); assert_eq!(packet.num_messages(), 1); - assert_eq!( - manager.can_add_bits(small_message.len() * (u8::BITS as usize)), - true - ); - packet.add_message( - channel_id.clone(), - SingleData::new(None, small_message.clone()), - ); + assert!(manager.can_add_bits(small_message.len() * (u8::BITS as usize)),); + packet.add_message(*channel_id, SingleData::new(None, small_message.clone())); assert_eq!(packet.num_messages(), 2); Ok(()) } @@ -693,16 +678,10 @@ mod tests { let big_message = Bytes::from(vec![1u8; 2 * MTU_PAYLOAD_BYTES]); let mut packet = manager.build_new_single_packet(); - assert_eq!( - manager.can_add_channel_to_packet(channel_id, &mut packet)?, - true - ); + assert!(manager.can_add_channel_to_packet(channel_id, &mut packet)?,); // the big message is too big to fit in the packet - assert_eq!( - manager.can_add_bits(big_message.len() * (u8::BITS as usize)), - false - ); + assert!(!manager.can_add_bits(big_message.len() * (u8::BITS as usize)),); Ok(()) } @@ -793,17 +772,11 @@ mod tests { // only 1 bit can be written manager.try_write_buffer.set_reserved_bits(1); // cannot write channel because of the continuation bit - assert_eq!( - manager.can_add_channel_to_packet(channel_id, &mut packet)?, - false - ); + assert!(!manager.can_add_channel_to_packet(channel_id, &mut packet)?,); manager.clear_try_write_buffer(); manager.try_write_buffer.set_reserved_bits(2); - assert_eq!( - manager.can_add_channel_to_packet(channel_id, &mut packet)?, - true - ); + assert!(manager.can_add_channel_to_packet(channel_id, &mut packet)?,); Ok(()) } diff --git a/shared/src/protocol/channel.rs b/shared/src/protocol/channel.rs index 07baa2b71..8d3d60564 100644 --- a/shared/src/protocol/channel.rs +++ b/shared/src/protocol/channel.rs @@ -115,7 +115,7 @@ mod tests { registry.add::(settings.clone()); assert_eq!(registry.len(), 1); - let mut builder = registry.get_builder_from_net_id(0).unwrap(); + let builder = registry.get_builder_from_net_id(0).unwrap(); let channel_container: ChannelContainer = builder.build(); assert_eq!( channel_container.setting.mode, diff --git a/shared/src/serialize/wordbuffer/reader.rs b/shared/src/serialize/wordbuffer/reader.rs index 364c7772b..4db1f00e6 100644 --- a/shared/src/serialize/wordbuffer/reader.rs +++ b/shared/src/serialize/wordbuffer/reader.rs @@ -64,7 +64,7 @@ impl ReadBuffer for ReadWordBuffer { .0 .as_mut() .map_or_else(|| panic!("no reader"), |(reader, _)| reader); - Ok(T::decode(encoding, reader).context("error decoding")?) + T::decode(encoding, reader).context("error decoding") }) } diff --git a/shared/src/serialize/wordbuffer/writer.rs b/shared/src/serialize/wordbuffer/writer.rs index d92eb8f91..a44d0892b 100644 --- a/shared/src/serialize/wordbuffer/writer.rs +++ b/shared/src/serialize/wordbuffer/writer.rs @@ -130,13 +130,13 @@ mod tests { let mut read_buffer = ReadWordBuffer::start_read(bytes); let bool = read_buffer.deserialize::()?; - assert_eq!(bool, true); + assert!(bool); let bool = read_buffer.deserialize::()?; - assert_eq!(bool, false); + assert!(!bool); let bool = read_buffer.deserialize::()?; - assert_eq!(bool, true); + assert!(bool); let bool = read_buffer.deserialize::()?; - assert_eq!(bool, true); + assert!(!bool); read_buffer.finish_read()?; dbg!(bytes); diff --git a/shared/src/server/events.rs b/shared/src/server/events.rs index 84fc68493..30316585a 100644 --- a/shared/src/server/events.rs +++ b/shared/src/server/events.rs @@ -63,7 +63,7 @@ impl ServerEvents

{ pub fn iter_connections(&self) -> impl Iterator + '_ { self.events .iter() - .filter_map(|(client_id, events)| events.has_connection().then_some(client_id.clone())) + .filter_map(|(client_id, events)| events.has_connection().then_some(*client_id)) } pub fn has_connections(&self) -> bool { @@ -95,9 +95,9 @@ impl ServerEvents

{ // } pub fn iter_disconnections(&self) -> impl Iterator + '_ { - self.events.iter().filter_map(|(client_id, events)| { - events.has_disconnection().then_some(client_id.clone()) - }) + self.events + .iter() + .filter_map(|(client_id, events)| events.has_disconnection().then_some(*client_id)) } pub fn has_disconnections(&self) -> bool { @@ -140,8 +140,8 @@ impl IterMessageEvent for ServerEvents

{ { Box::new(self.events.iter_mut().flat_map(|(client_id, events)| { let messages = events.into_iter_messages::().map(|(message, _)| message); - let client_ids = std::iter::once(client_id.clone()).cycle(); - return messages.zip(client_ids); + let client_ids = std::iter::once(*client_id).cycle(); + messages.zip(client_ids) })) } @@ -156,8 +156,8 @@ impl IterEntitySpawnEvent for ServerEvents

{ fn into_iter_entity_spawn(&mut self) -> Box + '_> { Box::new(self.events.iter_mut().flat_map(|(client_id, events)| { let entities = events.into_iter_entity_spawn().map(|(entity, _)| entity); - let client_ids = std::iter::once(client_id.clone()).cycle(); - return entities.zip(client_ids); + let client_ids = std::iter::once(*client_id).cycle(); + entities.zip(client_ids) })) } diff --git a/shared/src/server/input.rs b/shared/src/server/input.rs index c2b57543e..917830c87 100644 --- a/shared/src/server/input.rs +++ b/shared/src/server/input.rs @@ -33,7 +33,7 @@ pub struct InputPlugin { impl Default for InputPlugin

{ fn default() -> Self { Self { - _marker: std::marker::PhantomData::default(), + _marker: std::marker::PhantomData, } } } diff --git a/shared/src/server/resource.rs b/shared/src/server/resource.rs index 3e2d40b58..bc6e046d1 100644 --- a/shared/src/server/resource.rs +++ b/shared/src/server/resource.rs @@ -1,3 +1,4 @@ +use std::collections::hash_map::Entry; use std::collections::HashMap; use std::net::SocketAddr; use std::time::Duration; @@ -277,7 +278,7 @@ impl Server

{ .context("Error updating netcode server")?; // update connections - for (_, connection) in &mut self.user_connections { + for connection in self.user_connections.values_mut() { connection .base .update(&self.time_manager, &self.tick_manager); @@ -286,7 +287,7 @@ impl Server

{ // handle connections for client_id in self.context.connections.try_iter() { // TODO: do we need a mutex around this? - if !self.user_connections.contains_key(&client_id) { + if let Entry::Vacant(e) = self.user_connections.entry(client_id) { #[cfg(feature = "metrics")] metrics::increment_gauge!("connected_clients", 1.0); @@ -295,7 +296,7 @@ impl Server

{ let mut connection = Connection::new(self.protocol.channel_registry(), &self.config.ping); connection.base.events.push_connection(); - self.user_connections.insert(client_id, connection); + e.insert(connection); } } diff --git a/shared/src/shared/events.rs b/shared/src/shared/events.rs index 5543febf3..704617fbb 100644 --- a/shared/src/shared/events.rs +++ b/shared/src/shared/events.rs @@ -125,7 +125,7 @@ impl ComponentUpdateEvent { Self { entity, context, - _marker: PhantomData::default(), + _marker: PhantomData, } } @@ -151,7 +151,7 @@ impl ComponentInsertEvent { Self { entity, context, - _marker: PhantomData::default(), + _marker: PhantomData, } } @@ -177,7 +177,7 @@ impl ComponentRemoveEvent { Self { entity, context, - _marker: PhantomData::default(), + _marker: PhantomData, } } diff --git a/shared/src/shared/log.rs b/shared/src/shared/log.rs index 6d6e1912a..9ee50ad5c 100644 --- a/shared/src/shared/log.rs +++ b/shared/src/shared/log.rs @@ -53,6 +53,7 @@ impl Default for LogPlugin { } #[derive(Clone)] +/// Configuration to setup logging/metrics pub struct LogConfig { /// Filters logs using the [`EnvFilter`] format pub filter: String, diff --git a/shared/src/shared/ping/manager.rs b/shared/src/shared/ping/manager.rs index 4c0771c42..0730a460f 100644 --- a/shared/src/shared/ping/manager.rs +++ b/shared/src/shared/ping/manager.rs @@ -123,9 +123,7 @@ impl PingManager { if self.ping_timer.elapsed() >= self.config.ping_interval { self.ping_timer.reset(); - let ping_id = self - .ping_store - .push_new(time_manager.current_time().clone()); + let ping_id = self.ping_store.push_new(time_manager.current_time()); return Some(Ping { id: ping_id }); } @@ -225,7 +223,7 @@ impl PingManager { } SyncMessage::Pong(pong) => { // process the pong - self.process_pong(&pong, time_manager); + self.process_pong(pong, time_manager); } } } @@ -260,12 +258,11 @@ mod tests { let mut ping_manager = PingManager::new(&config); let mut time_manager = TimeManager::new(Duration::default()); - assert!(!ping_manager.is_synced()); assert_eq!(ping_manager.maybe_prepare_ping(&time_manager), None); let delta = Duration::from_millis(100); time_manager.update(delta, Duration::default()); - ping_manager.update(&mut time_manager); + ping_manager.update(&time_manager); // send pings assert_eq!( @@ -274,12 +271,12 @@ mod tests { ); let delta = Duration::from_millis(60); time_manager.update(delta, Duration::default()); - ping_manager.update(&mut time_manager); + ping_manager.update(&time_manager); // ping timer hasn't gone off yet, send nothing assert_eq!(ping_manager.maybe_prepare_ping(&time_manager), None); time_manager.update(delta, Duration::default()); - ping_manager.update(&mut time_manager); + ping_manager.update(&time_manager); assert_eq!( ping_manager.maybe_prepare_ping(&time_manager), Some(Ping { id: PingId(1) }) @@ -287,7 +284,7 @@ mod tests { let delta = Duration::from_millis(100); time_manager.update(delta, Duration::default()); - ping_manager.update(&mut time_manager); + ping_manager.update(&time_manager); assert_eq!( ping_manager.maybe_prepare_ping(&time_manager), Some(Ping { id: PingId(2) }) diff --git a/shared/src/shared/ping/store.rs b/shared/src/shared/ping/store.rs index cacf49bc5..875660af7 100644 --- a/shared/src/shared/ping/store.rs +++ b/shared/src/shared/ping/store.rs @@ -16,6 +16,12 @@ pub struct PingStore { buffer: SequenceBuffer, } +impl Default for PingStore { + fn default() -> Self { + Self::new() + } +} + impl PingStore { pub fn new() -> Self { PingStore { diff --git a/shared/src/shared/replication/systems.rs b/shared/src/shared/replication/systems.rs index ac28b6813..15c758497 100644 --- a/shared/src/shared/replication/systems.rs +++ b/shared/src/shared/replication/systems.rs @@ -30,7 +30,7 @@ fn add_despawn_tracker( for (entity, replicate) in query.iter() { debug!("Adding DespawnTracker to entity: {:?}", entity); commands.entity(entity).insert(DespawnTracker); - replication.owned_entities.insert(entity, replicate.clone()); + replication.owned_entities.insert(entity, *replicate); } } @@ -75,7 +75,7 @@ fn send_entity_spawn>( // Replicate to already connected clients (replicate only new entities) query.iter().for_each(|(entity, replicate)| { if replicate.is_added() { - replication.owned_entities.insert(entity, replicate.clone()); + replication.owned_entities.insert(entity, *replicate); sender .entity_spawn(entity, vec![], replicate.deref()) .unwrap(); diff --git a/shared/src/shared/time_manager.rs b/shared/src/shared/time_manager.rs index 9ab9e3e10..866f7aa3b 100644 --- a/shared/src/shared/time_manager.rs +++ b/shared/src/shared/time_manager.rs @@ -81,9 +81,9 @@ impl TimeManager { self.wrapped_time += delta; // set the overstep to the overstep of fixed_time self.overstep = overstep; - self.send_timer.as_mut().map(|timer| { + if let Some(timer) = self.send_timer.as_mut() { timer.tick(delta); - }); + } } /// Current time since start, wrapped around 1 hour diff --git a/shared/src/utils/ready_buffer.rs b/shared/src/utils/ready_buffer.rs index 530cd31bc..e13231f0d 100644 --- a/shared/src/utils/ready_buffer.rs +++ b/shared/src/utils/ready_buffer.rs @@ -62,16 +62,11 @@ impl ReadyBuffer { return None; } let mut val = None; - loop { - if let Some(item_with_key) = self.heap.peek() { - // we have a new update that is older than what we want, stop - if item_with_key.key > *key { - // put back the update in the heap - // self.heap.push(item_with_key); - break; - } - } else { - // heap is empty + while let Some(item_with_key) = self.heap.peek() { + // we have a new update that is older than what we want, stop + if item_with_key.key > *key { + // put back the update in the heap + // self.heap.push(item_with_key); break; } // safety: we know that the heap is not empty and that the key is <= the provided key @@ -141,7 +136,7 @@ mod tests { heap.add_item(now + Duration::from_secs(3), 3); // no items are visible - assert_eq!(heap.has_item(&Instant::now()), false); + assert!(!heap.has_item(&Instant::now())); // we move the clock to 2, 2 items should be visible, in order of insertion MockClock::advance(Duration::from_secs(2)); diff --git a/shared/src/utils/sequence_buffer.rs b/shared/src/utils/sequence_buffer.rs index 602b1c180..0dabe23c9 100644 --- a/shared/src/utils/sequence_buffer.rs +++ b/shared/src/utils/sequence_buffer.rs @@ -15,6 +15,12 @@ pub struct SequenceBuffer { _marker: PhantomData, } +impl Default for SequenceBuffer { + fn default() -> Self { + Self::new() + } +} + impl SequenceBuffer { pub fn new() -> Self { Self {