zerocopy/lib.rs
1// Copyright 2018 The Fuchsia Authors
2//
3// Licensed under the 2-Clause BSD License <LICENSE-BSD or
4// https://opensource.org/license/bsd-2-clause>, Apache License, Version 2.0
5// <LICENSE-APACHE or https://www.apache.org/licenses/LICENSE-2.0>, or the MIT
6// license <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your option.
7// This file may not be copied, modified, or distributed except according to
8// those terms.
9
10// After updating the following doc comment, make sure to run the following
11// command to update `README.md` based on its contents:
12//
13// cargo -q run --manifest-path tools/Cargo.toml -p generate-readme > README.md
14
15//! ***<span style="font-size: 140%">Fast, safe, <span
16//! style="color:red;">compile error</span>. Pick two.</span>***
17//!
18//! Zerocopy makes zero-cost memory manipulation effortless. We write `unsafe`
19//! so you don't have to.
20//!
21//! *For an overview of what's changed from zerocopy 0.7, check out our [release
22//! notes][release-notes], which include a step-by-step upgrading guide.*
23//!
24//! *Have questions? Need more out of zerocopy? Submit a [customer request
25//! issue][customer-request-issue] or ask the maintainers on
26//! [GitHub][github-q-a] or [Discord][discord]!*
27//!
28//! [customer-request-issue]: https://github.com/google/zerocopy/issues/new/choose
29//! [release-notes]: https://github.com/google/zerocopy/discussions/1680
30//! [github-q-a]: https://github.com/google/zerocopy/discussions/categories/q-a
31//! [discord]: https://discord.gg/MAvWH2R6zk
32//!
33//! # Overview
34//!
35//! ##### Conversion Traits
36//!
37//! Zerocopy provides four derivable traits for zero-cost conversions:
38//! - [`TryFromBytes`] indicates that a type may safely be converted from
39//! certain byte sequences (conditional on runtime checks)
40//! - [`FromZeros`] indicates that a sequence of zero bytes represents a valid
41//! instance of a type
42//! - [`FromBytes`] indicates that a type may safely be converted from an
43//! arbitrary byte sequence
44//! - [`IntoBytes`] indicates that a type may safely be converted *to* a byte
45//! sequence
46//!
47//! These traits support sized types, slices, and [slice DSTs][slice-dsts].
48//!
49//! [slice-dsts]: KnownLayout#dynamically-sized-types
50//!
51//! ##### Marker Traits
52//!
53//! Zerocopy provides three derivable marker traits that do not provide any
54//! functionality themselves, but are required to call certain methods provided
55//! by the conversion traits:
56//! - [`KnownLayout`] indicates that zerocopy can reason about certain layout
57//! qualities of a type
58//! - [`Immutable`] indicates that a type is free from interior mutability,
59//! except by ownership or an exclusive (`&mut`) borrow
60//! - [`Unaligned`] indicates that a type's alignment requirement is 1
61//!
62//! You should generally derive these marker traits whenever possible.
63//!
64//! ##### Conversion Macros
65//!
66//! Zerocopy provides six macros for safe casting between types:
67//!
68//! - ([`try_`][try_transmute])[`transmute`] (conditionally) converts a value of
69//! one type to a value of another type of the same size
70//! - ([`try_`][try_transmute_mut])[`transmute_mut`] (conditionally) converts a
71//! mutable reference of one type to a mutable reference of another type of
72//! the same size
73//! - ([`try_`][try_transmute_ref])[`transmute_ref`] (conditionally) converts a
74//! mutable or immutable reference of one type to an immutable reference of
75//! another type of the same size
76//!
77//! These macros perform *compile-time* size and alignment checks, meaning that
78//! unconditional casts have zero cost at runtime. Conditional casts do not need
79//! to validate size or alignment runtime, but do need to validate contents.
80//!
81//! These macros cannot be used in generic contexts. For generic conversions,
82//! use the methods defined by the [conversion traits](#conversion-traits).
83//!
84//! ##### Byteorder-Aware Numerics
85//!
86//! Zerocopy provides byte-order aware integer types that support these
87//! conversions; see the [`byteorder`] module. These types are especially useful
88//! for network parsing.
89//!
90//! # Cargo Features
91//!
92//! - **`alloc`**
93//! By default, `zerocopy` is `no_std`. When the `alloc` feature is enabled,
94//! the `alloc` crate is added as a dependency, and some allocation-related
95//! functionality is added.
96//!
97//! - **`std`**
98//! By default, `zerocopy` is `no_std`. When the `std` feature is enabled, the
99//! `std` crate is added as a dependency (ie, `no_std` is disabled), and
100//! support for some `std` types is added. `std` implies `alloc`.
101//!
102//! - **`derive`**
103//! Provides derives for the core marker traits via the `zerocopy-derive`
104//! crate. These derives are re-exported from `zerocopy`, so it is not
105//! necessary to depend on `zerocopy-derive` directly.
106//!
107//! However, you may experience better compile times if you instead directly
108//! depend on both `zerocopy` and `zerocopy-derive` in your `Cargo.toml`,
109//! since doing so will allow Rust to compile these crates in parallel. To do
110//! so, do *not* enable the `derive` feature, and list both dependencies in
111//! your `Cargo.toml` with the same leading non-zero version number; e.g:
112//!
113//! ```toml
114//! [dependencies]
115//! zerocopy = "0.X"
116//! zerocopy-derive = "0.X"
117//! ```
118//!
119//! To avoid the risk of [duplicate import errors][duplicate-import-errors] if
120//! one of your dependencies enables zerocopy's `derive` feature, import
121//! derives as `use zerocopy_derive::*` rather than by name (e.g., `use
122//! zerocopy_derive::FromBytes`).
123//!
124//! - **`simd`**
125//! When the `simd` feature is enabled, `FromZeros`, `FromBytes`, and
126//! `IntoBytes` impls are emitted for all stable SIMD types which exist on the
127//! target platform. Note that the layout of SIMD types is not yet stabilized,
128//! so these impls may be removed in the future if layout changes make them
129//! invalid. For more information, see the Unsafe Code Guidelines Reference
130//! page on the [layout of packed SIMD vectors][simd-layout].
131//!
132//! - **`simd-nightly`**
133//! Enables the `simd` feature and adds support for SIMD types which are only
134//! available on nightly. Since these types are unstable, support for any type
135//! may be removed at any point in the future.
136//!
137//! - **`float-nightly`**
138//! Adds support for the unstable `f16` and `f128` types. These types are
139//! not yet fully implemented and may not be supported on all platforms.
140//!
141//! [duplicate-import-errors]: https://github.com/google/zerocopy/issues/1587
142//! [simd-layout]: https://rust-lang.github.io/unsafe-code-guidelines/layout/packed-simd-vectors.html
143//!
144//! # Build Tuning
145//!
146//! ## `--cfg zerocopy_inline_always`
147//!
148//! Upgrades `#[inline]` to `#[inline(always)]` on many of zerocopy's public
149//! functions and methods. This provides a narrowly-scoped alternative that
150//! *may* improve the optimization of hot paths using zerocopy without the broad
151//! compile-time penalties of configuring `codegen-units=1`.
152//!
153//! # Security Ethos
154//!
155//! Zerocopy is expressly designed for use in security-critical contexts. We
156//! strive to ensure that that zerocopy code is sound under Rust's current
157//! memory model, and *any future memory model*. We ensure this by:
158//! - **...not 'guessing' about Rust's semantics.**
159//! We annotate `unsafe` code with a precise rationale for its soundness that
160//! cites a relevant section of Rust's official documentation. When Rust's
161//! documented semantics are unclear, we work with the Rust Operational
162//! Semantics Team to clarify Rust's documentation.
163//! - **...rigorously testing our implementation.**
164//! We run tests using [Miri], ensuring that zerocopy is sound across a wide
165//! array of supported target platforms of varying endianness and pointer
166//! width, and across both current and experimental memory models of Rust.
167//! - **...formally proving the correctness of our implementation.**
168//! We apply formal verification tools like [Kani][kani] to prove zerocopy's
169//! correctness.
170//!
171//! For more information, see our full [soundness policy].
172//!
173//! [Miri]: https://github.com/rust-lang/miri
174//! [Kani]: https://github.com/model-checking/kani
175//! [soundness policy]: https://github.com/google/zerocopy/blob/main/POLICIES.md#soundness
176//!
177//! # Relationship to Project Safe Transmute
178//!
179//! [Project Safe Transmute] is an official initiative of the Rust Project to
180//! develop language-level support for safer transmutation. The Project consults
181//! with crates like zerocopy to identify aspects of safer transmutation that
182//! would benefit from compiler support, and has developed an [experimental,
183//! compiler-supported analysis][mcp-transmutability] which determines whether,
184//! for a given type, any value of that type may be soundly transmuted into
185//! another type. Once this functionality is sufficiently mature, zerocopy
186//! intends to replace its internal transmutability analysis (implemented by our
187//! custom derives) with the compiler-supported one. This change will likely be
188//! an implementation detail that is invisible to zerocopy's users.
189//!
190//! Project Safe Transmute will not replace the need for most of zerocopy's
191//! higher-level abstractions. The experimental compiler analysis is a tool for
192//! checking the soundness of `unsafe` code, not a tool to avoid writing
193//! `unsafe` code altogether. For the foreseeable future, crates like zerocopy
194//! will still be required in order to provide higher-level abstractions on top
195//! of the building block provided by Project Safe Transmute.
196//!
197//! [Project Safe Transmute]: https://rust-lang.github.io/rfcs/2835-project-safe-transmute.html
198//! [mcp-transmutability]: https://github.com/rust-lang/compiler-team/issues/411
199//!
200//! # MSRV
201//!
202//! See our [MSRV policy].
203//!
204//! [MSRV policy]: https://github.com/google/zerocopy/blob/main/POLICIES.md#msrv
205//!
206//! # Changelog
207//!
208//! Zerocopy uses [GitHub Releases].
209//!
210//! [GitHub Releases]: https://github.com/google/zerocopy/releases
211//!
212//! # Thanks
213//!
214//! Zerocopy is maintained by engineers at Google with help from [many wonderful
215//! contributors][contributors]. Thank you to everyone who has lent a hand in
216//! making Rust a little more secure!
217//!
218//! [contributors]: https://github.com/google/zerocopy/graphs/contributors
219
220// Sometimes we want to use lints which were added after our MSRV.
221// `unknown_lints` is `warn` by default and we deny warnings in CI, so without
222// this attribute, any unknown lint would cause a CI failure when testing with
223// our MSRV.
224#![allow(unknown_lints, non_local_definitions, unreachable_patterns)]
225#![deny(renamed_and_removed_lints)]
226#![deny(
227 anonymous_parameters,
228 deprecated_in_future,
229 late_bound_lifetime_arguments,
230 missing_copy_implementations,
231 missing_debug_implementations,
232 missing_docs,
233 path_statements,
234 patterns_in_fns_without_body,
235 rust_2018_idioms,
236 trivial_numeric_casts,
237 unreachable_pub,
238 unsafe_op_in_unsafe_fn,
239 unused_extern_crates,
240 // We intentionally choose not to deny `unused_qualifications`. When items
241 // are added to the prelude (e.g., `core::mem::size_of`), this has the
242 // consequence of making some uses trigger this lint on the latest toolchain
243 // (e.g., `mem::size_of`), but fixing it (e.g. by replacing with `size_of`)
244 // does not work on older toolchains.
245 //
246 // We tested a more complicated fix in #1413, but ultimately decided that,
247 // since this lint is just a minor style lint, the complexity isn't worth it
248 // - it's fine to occasionally have unused qualifications slip through,
249 // especially since these do not affect our user-facing API in any way.
250 variant_size_differences
251)]
252#![cfg_attr(
253 __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS,
254 deny(fuzzy_provenance_casts, lossy_provenance_casts)
255)]
256#![deny(
257 clippy::all,
258 clippy::alloc_instead_of_core,
259 clippy::arithmetic_side_effects,
260 clippy::as_underscore,
261 clippy::assertions_on_result_states,
262 clippy::as_conversions,
263 clippy::correctness,
264 clippy::dbg_macro,
265 clippy::decimal_literal_representation,
266 clippy::double_must_use,
267 clippy::get_unwrap,
268 clippy::indexing_slicing,
269 clippy::missing_inline_in_public_items,
270 clippy::missing_safety_doc,
271 clippy::multiple_unsafe_ops_per_block,
272 clippy::must_use_candidate,
273 clippy::must_use_unit,
274 clippy::obfuscated_if_else,
275 clippy::perf,
276 clippy::print_stdout,
277 clippy::return_self_not_must_use,
278 clippy::std_instead_of_core,
279 clippy::style,
280 clippy::suspicious,
281 clippy::todo,
282 clippy::undocumented_unsafe_blocks,
283 clippy::unimplemented,
284 clippy::unnested_or_patterns,
285 clippy::unwrap_used,
286 clippy::use_debug
287)]
288// `clippy::incompatible_msrv` (implied by `clippy::suspicious`): This sometimes
289// has false positives, and we test on our MSRV in CI, so it doesn't help us
290// anyway.
291#![allow(clippy::needless_lifetimes, clippy::type_complexity, clippy::incompatible_msrv)]
292#![deny(
293 rustdoc::bare_urls,
294 rustdoc::broken_intra_doc_links,
295 rustdoc::invalid_codeblock_attributes,
296 rustdoc::invalid_html_tags,
297 rustdoc::invalid_rust_codeblocks,
298 rustdoc::missing_crate_level_docs,
299 rustdoc::private_intra_doc_links
300)]
301// In test code, it makes sense to weight more heavily towards concise, readable
302// code over correct or debuggable code.
303#![cfg_attr(any(test, kani), allow(
304 // In tests, you get line numbers and have access to source code, so panic
305 // messages are less important. You also often unwrap a lot, which would
306 // make expect'ing instead very verbose.
307 clippy::unwrap_used,
308 // In tests, there's no harm to "panic risks" - the worst that can happen is
309 // that your test will fail, and you'll fix it. By contrast, panic risks in
310 // production code introduce the possibly of code panicking unexpectedly "in
311 // the field".
312 clippy::arithmetic_side_effects,
313 clippy::indexing_slicing,
314))]
315#![cfg_attr(not(any(test, kani, feature = "std")), no_std)]
316#![cfg_attr(
317 all(feature = "simd-nightly", target_arch = "arm"),
318 feature(stdarch_arm_neon_intrinsics)
319)]
320#![cfg_attr(
321 all(feature = "simd-nightly", any(target_arch = "powerpc", target_arch = "powerpc64")),
322 feature(stdarch_powerpc)
323)]
324#![cfg_attr(feature = "float-nightly", feature(f16, f128))]
325#![cfg_attr(doc_cfg, feature(doc_cfg))]
326#![cfg_attr(__ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS, feature(coverage_attribute))]
327#![cfg_attr(
328 any(__ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS, miri),
329 feature(layout_for_ptr)
330)]
331#![cfg_attr(all(test, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS), feature(test))]
332
333// This is a hack to allow zerocopy-derive derives to work in this crate. They
334// assume that zerocopy is linked as an extern crate, so they access items from
335// it as `zerocopy::Xxx`. This makes that still work.
336#[cfg(any(feature = "derive", test))]
337extern crate self as zerocopy;
338
339#[cfg(all(test, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS))]
340extern crate test;
341
342#[doc(hidden)]
343#[macro_use]
344pub mod util;
345
346pub mod byte_slice;
347pub mod byteorder;
348mod deprecated;
349
350#[cfg(__ZEROCOPY_INTERNAL_USE_ONLY_DEV_MODE)]
351pub mod doctests;
352
353// This module is `pub` so that zerocopy's error types and error handling
354// documentation is grouped together in a cohesive module. In practice, we
355// expect most users to use the re-export of `error`'s items to avoid identifier
356// stuttering.
357pub mod error;
358mod impls;
359#[doc(hidden)]
360pub mod layout;
361mod macros;
362#[doc(hidden)]
363pub mod pointer;
364mod r#ref;
365mod split_at;
366// FIXME(#252): If we make this pub, come up with a better name.
367mod wrappers;
368
369use core::{
370 cell::{Cell, UnsafeCell},
371 cmp::Ordering,
372 fmt::{self, Debug, Display, Formatter},
373 hash::Hasher,
374 marker::PhantomData,
375 mem::{self, ManuallyDrop, MaybeUninit as CoreMaybeUninit},
376 num::{
377 NonZeroI128, NonZeroI16, NonZeroI32, NonZeroI64, NonZeroI8, NonZeroIsize, NonZeroU128,
378 NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU8, NonZeroUsize, Wrapping,
379 },
380 ops::{Deref, DerefMut},
381 ptr::{self, NonNull},
382 slice,
383};
384#[cfg(feature = "std")]
385use std::io;
386
387#[doc(hidden)]
388pub use crate::pointer::invariant::{self, BecauseExclusive};
389#[doc(hidden)]
390pub use crate::pointer::PtrInner;
391pub use crate::{
392 byte_slice::*,
393 byteorder::*,
394 error::*,
395 r#ref::*,
396 split_at::{Split, SplitAt},
397 wrappers::*,
398};
399
400#[cfg(any(feature = "alloc", test, kani))]
401extern crate alloc;
402#[cfg(any(feature = "alloc", test))]
403use alloc::{boxed::Box, vec::Vec};
404#[cfg(any(feature = "alloc", test))]
405use core::alloc::Layout;
406
407use util::MetadataOf;
408
409// Used by `KnownLayout`.
410#[doc(hidden)]
411pub use crate::layout::*;
412// Used by `TryFromBytes::is_bit_valid`.
413#[doc(hidden)]
414pub use crate::pointer::{invariant::BecauseImmutable, Maybe, Ptr};
415// For each trait polyfill, as soon as the corresponding feature is stable, the
416// polyfill import will be unused because method/function resolution will prefer
417// the inherent method/function over a trait method/function. Thus, we suppress
418// the `unused_imports` warning.
419//
420// See the documentation on `util::polyfills` for more information.
421#[allow(unused_imports)]
422use crate::util::polyfills::{self, NonNullExt as _, NumExt as _};
423
424#[cfg(all(test, not(__ZEROCOPY_INTERNAL_USE_ONLY_DEV_MODE)))]
425const _: () = {
426 #[deprecated = "Development of zerocopy using cargo is not supported. Please use `cargo.sh` or `win-cargo.bat` instead."]
427 #[allow(unused)]
428 const WARNING: () = ();
429 #[warn(deprecated)]
430 WARNING
431};
432
433/// Implements [`KnownLayout`].
434///
435/// This derive analyzes various aspects of a type's layout that are needed for
436/// some of zerocopy's APIs. It can be applied to structs, enums, and unions;
437/// e.g.:
438///
439/// ```
440/// # use zerocopy_derive::KnownLayout;
441/// #[derive(KnownLayout)]
442/// struct MyStruct {
443/// # /*
444/// ...
445/// # */
446/// }
447///
448/// #[derive(KnownLayout)]
449/// enum MyEnum {
450/// # V00,
451/// # /*
452/// ...
453/// # */
454/// }
455///
456/// #[derive(KnownLayout)]
457/// union MyUnion {
458/// # variant: u8,
459/// # /*
460/// ...
461/// # */
462/// }
463/// ```
464///
465/// # Limitations
466///
467/// This derive cannot currently be applied to unsized structs without an
468/// explicit `repr` attribute.
469///
470/// Some invocations of this derive run afoul of a [known bug] in Rust's type
471/// privacy checker. For example, this code:
472///
473/// ```compile_fail,E0446
474/// use zerocopy::*;
475/// # use zerocopy_derive::*;
476///
477/// #[derive(KnownLayout)]
478/// #[repr(C)]
479/// pub struct PublicType {
480/// leading: Foo,
481/// trailing: Bar,
482/// }
483///
484/// #[derive(KnownLayout)]
485/// struct Foo;
486///
487/// #[derive(KnownLayout)]
488/// struct Bar;
489/// ```
490///
491/// ...results in a compilation error:
492///
493/// ```text
494/// error[E0446]: private type `Bar` in public interface
495/// --> examples/bug.rs:3:10
496/// |
497/// 3 | #[derive(KnownLayout)]
498/// | ^^^^^^^^^^^ can't leak private type
499/// ...
500/// 14 | struct Bar;
501/// | ---------- `Bar` declared as private
502/// |
503/// = note: this error originates in the derive macro `KnownLayout` (in Nightly builds, run with -Z macro-backtrace for more info)
504/// ```
505///
506/// This issue arises when `#[derive(KnownLayout)]` is applied to `repr(C)`
507/// structs whose trailing field type is less public than the enclosing struct.
508///
509/// To work around this, mark the trailing field type `pub` and annotate it with
510/// `#[doc(hidden)]`; e.g.:
511///
512/// ```no_run
513/// use zerocopy::*;
514/// # use zerocopy_derive::*;
515///
516/// #[derive(KnownLayout)]
517/// #[repr(C)]
518/// pub struct PublicType {
519/// leading: Foo,
520/// trailing: Bar,
521/// }
522///
523/// #[derive(KnownLayout)]
524/// struct Foo;
525///
526/// #[doc(hidden)]
527/// #[derive(KnownLayout)]
528/// pub struct Bar; // <- `Bar` is now also `pub`
529/// ```
530///
531/// [known bug]: https://github.com/rust-lang/rust/issues/45713
532#[cfg(any(feature = "derive", test))]
533#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
534pub use zerocopy_derive::KnownLayout;
535// These exist so that code which was written against the old names will get
536// less confusing error messages when they upgrade to a more recent version of
537// zerocopy. On our MSRV toolchain, the error messages read, for example:
538//
539// error[E0603]: trait `FromZeroes` is private
540// --> examples/deprecated.rs:1:15
541// |
542// 1 | use zerocopy::FromZeroes;
543// | ^^^^^^^^^^ private trait
544// |
545// note: the trait `FromZeroes` is defined here
546// --> /Users/josh/workspace/zerocopy/src/lib.rs:1845:5
547// |
548// 1845 | use FromZeros as FromZeroes;
549// | ^^^^^^^^^^^^^^^^^^^^^^^
550//
551// The "note" provides enough context to make it easy to figure out how to fix
552// the error.
553#[allow(unused)]
554use {FromZeros as FromZeroes, IntoBytes as AsBytes, Ref as LayoutVerified};
555
556/// Indicates that zerocopy can reason about certain aspects of a type's layout.
557///
558/// This trait is required by many of zerocopy's APIs. It supports sized types,
559/// slices, and [slice DSTs](#dynamically-sized-types).
560///
561/// # Implementation
562///
563/// **Do not implement this trait yourself!** Instead, use
564/// [`#[derive(KnownLayout)]`][derive]; e.g.:
565///
566/// ```
567/// # use zerocopy_derive::KnownLayout;
568/// #[derive(KnownLayout)]
569/// struct MyStruct {
570/// # /*
571/// ...
572/// # */
573/// }
574///
575/// #[derive(KnownLayout)]
576/// enum MyEnum {
577/// # /*
578/// ...
579/// # */
580/// }
581///
582/// #[derive(KnownLayout)]
583/// union MyUnion {
584/// # variant: u8,
585/// # /*
586/// ...
587/// # */
588/// }
589/// ```
590///
591/// This derive performs a sophisticated analysis to deduce the layout
592/// characteristics of types. You **must** implement this trait via the derive.
593///
594/// # Dynamically-sized types
595///
596/// `KnownLayout` supports slice-based dynamically sized types ("slice DSTs").
597///
598/// A slice DST is a type whose trailing field is either a slice or another
599/// slice DST, rather than a type with fixed size. For example:
600///
601/// ```
602/// #[repr(C)]
603/// struct PacketHeader {
604/// # /*
605/// ...
606/// # */
607/// }
608///
609/// #[repr(C)]
610/// struct Packet {
611/// header: PacketHeader,
612/// body: [u8],
613/// }
614/// ```
615///
616/// It can be useful to think of slice DSTs as a generalization of slices - in
617/// other words, a normal slice is just the special case of a slice DST with
618/// zero leading fields. In particular:
619/// - Like slices, slice DSTs can have different lengths at runtime
620/// - Like slices, slice DSTs cannot be passed by-value, but only by reference
621/// or via other indirection such as `Box`
622/// - Like slices, a reference (or `Box`, or other pointer type) to a slice DST
623/// encodes the number of elements in the trailing slice field
624///
625/// ## Slice DST layout
626///
627/// Just like other composite Rust types, the layout of a slice DST is not
628/// well-defined unless it is specified using an explicit `#[repr(...)]`
629/// attribute such as `#[repr(C)]`. [Other representations are
630/// supported][reprs], but in this section, we'll use `#[repr(C)]` as our
631/// example.
632///
633/// A `#[repr(C)]` slice DST is laid out [just like sized `#[repr(C)]`
634/// types][repr-c-structs], but the presence of a variable-length field
635/// introduces the possibility of *dynamic padding*. In particular, it may be
636/// necessary to add trailing padding *after* the trailing slice field in order
637/// to satisfy the outer type's alignment, and the amount of padding required
638/// may be a function of the length of the trailing slice field. This is just a
639/// natural consequence of the normal `#[repr(C)]` rules applied to slice DSTs,
640/// but it can result in surprising behavior. For example, consider the
641/// following type:
642///
643/// ```
644/// #[repr(C)]
645/// struct Foo {
646/// a: u32,
647/// b: u8,
648/// z: [u16],
649/// }
650/// ```
651///
652/// Assuming that `u32` has alignment 4 (this is not true on all platforms),
653/// then `Foo` has alignment 4 as well. Here is the smallest possible value for
654/// `Foo`:
655///
656/// ```text
657/// byte offset | 01234567
658/// field | aaaab---
659/// ><
660/// ```
661///
662/// In this value, `z` has length 0. Abiding by `#[repr(C)]`, the lowest offset
663/// that we can place `z` at is 5, but since `z` has alignment 2, we need to
664/// round up to offset 6. This means that there is one byte of padding between
665/// `b` and `z`, then 0 bytes of `z` itself (denoted `><` in this diagram), and
666/// then two bytes of padding after `z` in order to satisfy the overall
667/// alignment of `Foo`. The size of this instance is 8 bytes.
668///
669/// What about if `z` has length 1?
670///
671/// ```text
672/// byte offset | 01234567
673/// field | aaaab-zz
674/// ```
675///
676/// In this instance, `z` has length 1, and thus takes up 2 bytes. That means
677/// that we no longer need padding after `z` in order to satisfy `Foo`'s
678/// alignment. We've now seen two different values of `Foo` with two different
679/// lengths of `z`, but they both have the same size - 8 bytes.
680///
681/// What about if `z` has length 2?
682///
683/// ```text
684/// byte offset | 012345678901
685/// field | aaaab-zzzz--
686/// ```
687///
688/// Now `z` has length 2, and thus takes up 4 bytes. This brings our un-padded
689/// size to 10, and so we now need another 2 bytes of padding after `z` to
690/// satisfy `Foo`'s alignment.
691///
692/// Again, all of this is just a logical consequence of the `#[repr(C)]` rules
693/// applied to slice DSTs, but it can be surprising that the amount of trailing
694/// padding becomes a function of the trailing slice field's length, and thus
695/// can only be computed at runtime.
696///
697/// [reprs]: https://doc.rust-lang.org/reference/type-layout.html#representations
698/// [repr-c-structs]: https://doc.rust-lang.org/reference/type-layout.html#reprc-structs
699///
700/// ## What is a valid size?
701///
702/// There are two places in zerocopy's API that we refer to "a valid size" of a
703/// type. In normal casts or conversions, where the source is a byte slice, we
704/// need to know whether the source byte slice is a valid size of the
705/// destination type. In prefix or suffix casts, we need to know whether *there
706/// exists* a valid size of the destination type which fits in the source byte
707/// slice and, if so, what the largest such size is.
708///
709/// As outlined above, a slice DST's size is defined by the number of elements
710/// in its trailing slice field. However, there is not necessarily a 1-to-1
711/// mapping between trailing slice field length and overall size. As we saw in
712/// the previous section with the type `Foo`, instances with both 0 and 1
713/// elements in the trailing `z` field result in a `Foo` whose size is 8 bytes.
714///
715/// When we say "x is a valid size of `T`", we mean one of two things:
716/// - If `T: Sized`, then we mean that `x == size_of::<T>()`
717/// - If `T` is a slice DST, then we mean that there exists a `len` such that the instance of
718/// `T` with `len` trailing slice elements has size `x`
719///
720/// When we say "largest possible size of `T` that fits in a byte slice", we
721/// mean one of two things:
722/// - If `T: Sized`, then we mean `size_of::<T>()` if the byte slice is at least
723/// `size_of::<T>()` bytes long
724/// - If `T` is a slice DST, then we mean to consider all values, `len`, such
725/// that the instance of `T` with `len` trailing slice elements fits in the
726/// byte slice, and to choose the largest such `len`, if any
727///
728///
729/// # Safety
730///
731/// This trait does not convey any safety guarantees to code outside this crate.
732///
733/// You must not rely on the `#[doc(hidden)]` internals of `KnownLayout`. Future
734/// releases of zerocopy may make backwards-breaking changes to these items,
735/// including changes that only affect soundness, which may cause code which
736/// uses those items to silently become unsound.
737///
738#[cfg_attr(feature = "derive", doc = "[derive]: zerocopy_derive::KnownLayout")]
739#[cfg_attr(
740 not(feature = "derive"),
741 doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.KnownLayout.html"),
742)]
743#[cfg_attr(
744 not(no_zerocopy_diagnostic_on_unimplemented_1_78_0),
745 diagnostic::on_unimplemented(note = "Consider adding `#[derive(KnownLayout)]` to `{Self}`")
746)]
747pub unsafe trait KnownLayout {
748 // The `Self: Sized` bound makes it so that `KnownLayout` can still be
749 // object safe. It's not currently object safe thanks to `const LAYOUT`, and
750 // it likely won't be in the future, but there's no reason not to be
751 // forwards-compatible with object safety.
752 #[doc(hidden)]
753 fn only_derive_is_allowed_to_implement_this_trait()
754 where
755 Self: Sized;
756
757 /// The type of metadata stored in a pointer to `Self`.
758 ///
759 /// This is `()` for sized types and [`usize`] for slice DSTs.
760 type PointerMetadata: PointerMetadata;
761
762 /// A maybe-uninitialized analog of `Self`
763 ///
764 /// # Safety
765 ///
766 /// `Self::LAYOUT` and `Self::MaybeUninit::LAYOUT` are identical.
767 /// `Self::MaybeUninit` admits uninitialized bytes in all positions.
768 #[doc(hidden)]
769 type MaybeUninit: ?Sized + KnownLayout<PointerMetadata = Self::PointerMetadata>;
770
771 /// The layout of `Self`.
772 ///
773 /// # Safety
774 ///
775 /// Callers may assume that `LAYOUT` accurately reflects the layout of
776 /// `Self`. In particular:
777 /// - `LAYOUT.align` is equal to `Self`'s alignment
778 /// - If `Self: Sized`, then `LAYOUT.size_info == SizeInfo::Sized { size }`
779 /// where `size == size_of::<Self>()`
780 /// - If `Self` is a slice DST, then `LAYOUT.size_info ==
781 /// SizeInfo::SliceDst(slice_layout)` where:
782 /// - The size, `size`, of an instance of `Self` with `elems` trailing
783 /// slice elements is equal to `slice_layout.offset +
784 /// slice_layout.elem_size * elems` rounded up to the nearest multiple
785 /// of `LAYOUT.align`
786 /// - For such an instance, any bytes in the range `[slice_layout.offset +
787 /// slice_layout.elem_size * elems, size)` are padding and must not be
788 /// assumed to be initialized
789 #[doc(hidden)]
790 const LAYOUT: DstLayout;
791
792 /// SAFETY: The returned pointer has the same address and provenance as
793 /// `bytes`. If `Self` is a DST, the returned pointer's referent has `elems`
794 /// elements in its trailing slice.
795 #[doc(hidden)]
796 fn raw_from_ptr_len(bytes: NonNull<u8>, meta: Self::PointerMetadata) -> NonNull<Self>;
797
798 /// Extracts the metadata from a pointer to `Self`.
799 ///
800 /// # Safety
801 ///
802 /// `pointer_to_metadata` always returns the correct metadata stored in
803 /// `ptr`.
804 #[doc(hidden)]
805 fn pointer_to_metadata(ptr: *mut Self) -> Self::PointerMetadata;
806
807 /// Computes the length of the byte range addressed by `ptr`.
808 ///
809 /// Returns `None` if the resulting length would not fit in an `usize`.
810 ///
811 /// # Safety
812 ///
813 /// Callers may assume that `size_of_val_raw` always returns the correct
814 /// size.
815 ///
816 /// Callers may assume that, if `ptr` addresses a byte range whose length
817 /// fits in an `usize`, this will return `Some`.
818 #[doc(hidden)]
819 #[must_use]
820 #[inline(always)]
821 fn size_of_val_raw(ptr: NonNull<Self>) -> Option<usize> {
822 let meta = Self::pointer_to_metadata(ptr.as_ptr());
823 // SAFETY: `size_for_metadata` promises to only return `None` if the
824 // resulting size would not fit in a `usize`.
825 Self::size_for_metadata(meta)
826 }
827
828 #[doc(hidden)]
829 #[must_use]
830 #[inline(always)]
831 fn raw_dangling() -> NonNull<Self> {
832 let meta = Self::PointerMetadata::from_elem_count(0);
833 Self::raw_from_ptr_len(NonNull::dangling(), meta)
834 }
835
836 /// Computes the size of an object of type `Self` with the given pointer
837 /// metadata.
838 ///
839 /// # Safety
840 ///
841 /// `size_for_metadata` promises to return `None` if and only if the
842 /// resulting size would not fit in a [`usize`]. Note that the returned size
843 /// could exceed the actual maximum valid size of an allocated object,
844 /// [`isize::MAX`].
845 ///
846 /// # Examples
847 ///
848 /// ```
849 /// use zerocopy::KnownLayout;
850 ///
851 /// assert_eq!(u8::size_for_metadata(()), Some(1));
852 /// assert_eq!(u16::size_for_metadata(()), Some(2));
853 /// assert_eq!(<[u8]>::size_for_metadata(42), Some(42));
854 /// assert_eq!(<[u16]>::size_for_metadata(42), Some(84));
855 ///
856 /// // This size exceeds the maximum valid object size (`isize::MAX`):
857 /// assert_eq!(<[u8]>::size_for_metadata(usize::MAX), Some(usize::MAX));
858 ///
859 /// // This size, if computed, would exceed `usize::MAX`:
860 /// assert_eq!(<[u16]>::size_for_metadata(usize::MAX), None);
861 /// ```
862 #[inline(always)]
863 fn size_for_metadata(meta: Self::PointerMetadata) -> Option<usize> {
864 meta.size_for_metadata(Self::LAYOUT)
865 }
866}
867
868/// Efficiently produces the [`TrailingSliceLayout`] of `T`.
869#[inline(always)]
870pub(crate) fn trailing_slice_layout<T>() -> TrailingSliceLayout
871where
872 T: ?Sized + KnownLayout<PointerMetadata = usize>,
873{
874 trait LayoutFacts {
875 const SIZE_INFO: TrailingSliceLayout;
876 }
877
878 impl<T: ?Sized> LayoutFacts for T
879 where
880 T: KnownLayout<PointerMetadata = usize>,
881 {
882 const SIZE_INFO: TrailingSliceLayout = match T::LAYOUT.size_info {
883 crate::SizeInfo::Sized { .. } => const_panic!("unreachable"),
884 crate::SizeInfo::SliceDst(info) => info,
885 };
886 }
887
888 T::SIZE_INFO
889}
890
891/// The metadata associated with a [`KnownLayout`] type.
892#[doc(hidden)]
893pub trait PointerMetadata: Copy + Eq + Debug {
894 /// Constructs a `Self` from an element count.
895 ///
896 /// If `Self = ()`, this returns `()`. If `Self = usize`, this returns
897 /// `elems`. No other types are currently supported.
898 fn from_elem_count(elems: usize) -> Self;
899
900 /// Converts `self` to an element count.
901 ///
902 /// If `Self = ()`, this returns `0`. If `Self = usize`, this returns
903 /// `self`. No other types are currently supported.
904 fn to_elem_count(self) -> usize;
905
906 /// Computes the size of the object with the given layout and pointer
907 /// metadata.
908 ///
909 /// # Panics
910 ///
911 /// If `Self = ()`, `layout` must describe a sized type. If `Self = usize`,
912 /// `layout` must describe a slice DST. Otherwise, `size_for_metadata` may
913 /// panic.
914 ///
915 /// # Safety
916 ///
917 /// `size_for_metadata` promises to only return `None` if the resulting size
918 /// would not fit in a `usize`.
919 fn size_for_metadata(self, layout: DstLayout) -> Option<usize>;
920}
921
922impl PointerMetadata for () {
923 #[inline]
924 #[allow(clippy::unused_unit)]
925 fn from_elem_count(_elems: usize) -> () {}
926
927 #[inline]
928 fn to_elem_count(self) -> usize {
929 0
930 }
931
932 #[inline]
933 fn size_for_metadata(self, layout: DstLayout) -> Option<usize> {
934 match layout.size_info {
935 SizeInfo::Sized { size } => Some(size),
936 // NOTE: This branch is unreachable, but we return `None` rather
937 // than `unreachable!()` to avoid generating panic paths.
938 SizeInfo::SliceDst(_) => None,
939 }
940 }
941}
942
943impl PointerMetadata for usize {
944 #[inline]
945 fn from_elem_count(elems: usize) -> usize {
946 elems
947 }
948
949 #[inline]
950 fn to_elem_count(self) -> usize {
951 self
952 }
953
954 #[inline]
955 fn size_for_metadata(self, layout: DstLayout) -> Option<usize> {
956 match layout.size_info {
957 SizeInfo::SliceDst(TrailingSliceLayout { offset, elem_size }) => {
958 let slice_len = elem_size.checked_mul(self)?;
959 let without_padding = offset.checked_add(slice_len)?;
960 without_padding.checked_add(util::padding_needed_for(without_padding, layout.align))
961 }
962 // NOTE: This branch is unreachable, but we return `None` rather
963 // than `unreachable!()` to avoid generating panic paths.
964 SizeInfo::Sized { .. } => None,
965 }
966 }
967}
968
969// SAFETY: Delegates safety to `DstLayout::for_slice`.
970unsafe impl<T> KnownLayout for [T] {
971 #[allow(clippy::missing_inline_in_public_items, dead_code)]
972 #[cfg_attr(
973 all(coverage_nightly, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS),
974 coverage(off)
975 )]
976 fn only_derive_is_allowed_to_implement_this_trait()
977 where
978 Self: Sized,
979 {
980 }
981
982 type PointerMetadata = usize;
983
984 // SAFETY: `CoreMaybeUninit<T>::LAYOUT` and `T::LAYOUT` are identical
985 // because `CoreMaybeUninit<T>` has the same size and alignment as `T` [1].
986 // Consequently, `[CoreMaybeUninit<T>]::LAYOUT` and `[T]::LAYOUT` are
987 // identical, because they both lack a fixed-sized prefix and because they
988 // inherit the alignments of their inner element type (which are identical)
989 // [2][3].
990 //
991 // `[CoreMaybeUninit<T>]` admits uninitialized bytes at all positions
992 // because `CoreMaybeUninit<T>` admits uninitialized bytes at all positions
993 // and because the inner elements of `[CoreMaybeUninit<T>]` are laid out
994 // back-to-back [2][3].
995 //
996 // [1] Per https://doc.rust-lang.org/1.81.0/std/mem/union.MaybeUninit.html#layout-1:
997 //
998 // `MaybeUninit<T>` is guaranteed to have the same size, alignment, and ABI as
999 // `T`
1000 //
1001 // [2] Per https://doc.rust-lang.org/1.82.0/reference/type-layout.html#slice-layout:
1002 //
1003 // Slices have the same layout as the section of the array they slice.
1004 //
1005 // [3] Per https://doc.rust-lang.org/1.82.0/reference/type-layout.html#array-layout:
1006 //
1007 // An array of `[T; N]` has a size of `size_of::<T>() * N` and the same
1008 // alignment of `T`. Arrays are laid out so that the zero-based `nth`
1009 // element of the array is offset from the start of the array by `n *
1010 // size_of::<T>()` bytes.
1011 type MaybeUninit = [CoreMaybeUninit<T>];
1012
1013 const LAYOUT: DstLayout = DstLayout::for_slice::<T>();
1014
1015 // SAFETY: `.cast` preserves address and provenance. The returned pointer
1016 // refers to an object with `elems` elements by construction.
1017 #[inline(always)]
1018 fn raw_from_ptr_len(data: NonNull<u8>, elems: usize) -> NonNull<Self> {
1019 // FIXME(#67): Remove this allow. See NonNullExt for more details.
1020 #[allow(unstable_name_collisions)]
1021 NonNull::slice_from_raw_parts(data.cast::<T>(), elems)
1022 }
1023
1024 #[inline(always)]
1025 fn pointer_to_metadata(ptr: *mut [T]) -> usize {
1026 #[allow(clippy::as_conversions)]
1027 let slc = ptr as *const [()];
1028
1029 // SAFETY:
1030 // - `()` has alignment 1, so `slc` is trivially aligned.
1031 // - `slc` was derived from a non-null pointer.
1032 // - The size is 0 regardless of the length, so it is sound to
1033 // materialize a reference regardless of location.
1034 // - By invariant, `self.ptr` has valid provenance.
1035 let slc = unsafe { &*slc };
1036
1037 // This is correct because the preceding `as` cast preserves the number
1038 // of slice elements. [1]
1039 //
1040 // [1] Per https://doc.rust-lang.org/reference/expressions/operator-expr.html#pointer-to-pointer-cast:
1041 //
1042 // For slice types like `[T]` and `[U]`, the raw pointer types `*const
1043 // [T]`, `*mut [T]`, `*const [U]`, and `*mut [U]` encode the number of
1044 // elements in this slice. Casts between these raw pointer types
1045 // preserve the number of elements. ... The same holds for `str` and
1046 // any compound type whose unsized tail is a slice type, such as
1047 // struct `Foo(i32, [u8])` or `(u64, Foo)`.
1048 slc.len()
1049 }
1050}
1051
1052#[rustfmt::skip]
1053impl_known_layout!(
1054 (),
1055 u8, i8, u16, i16, u32, i32, u64, i64, u128, i128, usize, isize, f32, f64,
1056 bool, char,
1057 NonZeroU8, NonZeroI8, NonZeroU16, NonZeroI16, NonZeroU32, NonZeroI32,
1058 NonZeroU64, NonZeroI64, NonZeroU128, NonZeroI128, NonZeroUsize, NonZeroIsize
1059);
1060#[rustfmt::skip]
1061#[cfg(feature = "float-nightly")]
1062impl_known_layout!(
1063 #[cfg_attr(doc_cfg, doc(cfg(feature = "float-nightly")))]
1064 f16,
1065 #[cfg_attr(doc_cfg, doc(cfg(feature = "float-nightly")))]
1066 f128
1067);
1068#[rustfmt::skip]
1069impl_known_layout!(
1070 T => Option<T>,
1071 T: ?Sized => PhantomData<T>,
1072 T => Wrapping<T>,
1073 T => CoreMaybeUninit<T>,
1074 T: ?Sized => *const T,
1075 T: ?Sized => *mut T,
1076 T: ?Sized => &'_ T,
1077 T: ?Sized => &'_ mut T,
1078);
1079impl_known_layout!(const N: usize, T => [T; N]);
1080
1081// SAFETY: `str` has the same representation as `[u8]`. `ManuallyDrop<T>` [1],
1082// `UnsafeCell<T>` [2], and `Cell<T>` [3] have the same representation as `T`.
1083//
1084// [1] Per https://doc.rust-lang.org/1.85.0/std/mem/struct.ManuallyDrop.html:
1085//
1086// `ManuallyDrop<T>` is guaranteed to have the same layout and bit validity as
1087// `T`
1088//
1089// [2] Per https://doc.rust-lang.org/1.85.0/core/cell/struct.UnsafeCell.html#memory-layout:
1090//
1091// `UnsafeCell<T>` has the same in-memory representation as its inner type
1092// `T`.
1093//
1094// [3] Per https://doc.rust-lang.org/1.85.0/core/cell/struct.Cell.html#memory-layout:
1095//
1096// `Cell<T>` has the same in-memory representation as `T`.
1097#[allow(clippy::multiple_unsafe_ops_per_block)]
1098const _: () = unsafe {
1099 unsafe_impl_known_layout!(
1100 #[repr([u8])]
1101 str
1102 );
1103 unsafe_impl_known_layout!(T: ?Sized + KnownLayout => #[repr(T)] ManuallyDrop<T>);
1104 unsafe_impl_known_layout!(T: ?Sized + KnownLayout => #[repr(T)] UnsafeCell<T>);
1105 unsafe_impl_known_layout!(T: ?Sized + KnownLayout => #[repr(T)] Cell<T>);
1106};
1107
1108// SAFETY:
1109// - By consequence of the invariant on `T::MaybeUninit` that `T::LAYOUT` and
1110// `T::MaybeUninit::LAYOUT` are equal, `T` and `T::MaybeUninit` have the same:
1111// - Fixed prefix size
1112// - Alignment
1113// - (For DSTs) trailing slice element size
1114// - By consequence of the above, referents `T::MaybeUninit` and `T` have the
1115// require the same kind of pointer metadata, and thus it is valid to perform
1116// an `as` cast from `*mut T` and `*mut T::MaybeUninit`, and this operation
1117// preserves referent size (ie, `size_of_val_raw`).
1118const _: () = unsafe {
1119 unsafe_impl_known_layout!(T: ?Sized + KnownLayout => #[repr(T::MaybeUninit)] MaybeUninit<T>)
1120};
1121
1122// FIXME(#196, #2856): Eventually, we'll want to support enums variants and
1123// union fields being treated uniformly since they behave similarly to each
1124// other in terms of projecting validity – specifically, for a type `T` with
1125// validity `V`, if `T` is a struct type, then its fields straightforwardly also
1126// have validity `V`. By contrast, if `T` is an enum or union type, then
1127// validity is not straightforwardly recursive in this way.
1128#[doc(hidden)]
1129pub const STRUCT_VARIANT_ID: i128 = -1;
1130#[doc(hidden)]
1131pub const UNION_VARIANT_ID: i128 = -2;
1132#[doc(hidden)]
1133pub const REPR_C_UNION_VARIANT_ID: i128 = -3;
1134
1135/// # Safety
1136///
1137/// `Self::ProjectToTag` must satisfy its safety invariant.
1138#[doc(hidden)]
1139pub unsafe trait HasTag {
1140 fn only_derive_is_allowed_to_implement_this_trait()
1141 where
1142 Self: Sized;
1143
1144 /// The type's enum tag, or `()` for non-enum types.
1145 type Tag: Immutable;
1146
1147 /// A pointer projection from `Self` to its tag.
1148 ///
1149 /// # Safety
1150 ///
1151 /// It must be the case that, for all `slf: Ptr<'_, Self, I>`, it is sound
1152 /// to project from `slf` to `Ptr<'_, Self::Tag, I>` using this projection.
1153 type ProjectToTag: pointer::cast::Project<Self, Self::Tag>;
1154}
1155
1156/// Projects a given field from `Self`.
1157///
1158/// All implementations of `HasField` for a particular field `f` in `Self`
1159/// should use the same `Field` type; this ensures that `Field` is inferable
1160/// given an explicit `VARIANT_ID` and `FIELD_ID`.
1161///
1162/// # Safety
1163///
1164/// A field `f` is `HasField` for `Self` if and only if:
1165///
1166/// - If `Self` has the layout of a struct or union type, then `VARIANT_ID` is
1167/// `STRUCT_VARIANT_ID` or `UNION_VARIANT_ID` respectively; otherwise, if
1168/// `Self` has the layout of an enum type, `VARIANT_ID` is the numerical index
1169/// of the enum variant in which `f` appears. Note that `Self` does not need
1170/// to actually *be* such a type – it just needs to have the same layout as
1171/// such a type. For example, a `#[repr(transparent)]` wrapper around an enum
1172/// has the same layout as that enum.
1173/// - If `f` has name `n`, `FIELD_ID` is `zerocopy::ident_id!(n)`; otherwise,
1174/// if `f` is at index `i`, `FIELD_ID` is `zerocopy::ident_id!(i)`.
1175/// - `Field` is a type with the same visibility as `f`.
1176/// - `Type` has the same type as `f`.
1177///
1178/// The caller must **not** assume that a pointer's referent being aligned
1179/// implies that calling `project` on that pointer will result in a pointer to
1180/// an aligned referent. For example, `HasField` may be implemented for
1181/// `#[repr(packed)]` structs.
1182///
1183/// The implementation of `project` must satisfy its safety post-condition.
1184#[doc(hidden)]
1185pub unsafe trait HasField<Field, const VARIANT_ID: i128, const FIELD_ID: i128>:
1186 HasTag
1187{
1188 fn only_derive_is_allowed_to_implement_this_trait()
1189 where
1190 Self: Sized;
1191
1192 /// The type of the field.
1193 type Type: ?Sized;
1194
1195 /// Projects from `slf` to the field.
1196 ///
1197 /// Users should generally not call `project` directly, and instead should
1198 /// use high-level APIs like [`PtrInner::project`] or [`Ptr::project`].
1199 ///
1200 /// # Safety
1201 ///
1202 /// The returned pointer refers to a non-strict subset of the bytes of
1203 /// `slf`'s referent, and has the same provenance as `slf`.
1204 #[must_use]
1205 fn project(slf: PtrInner<'_, Self>) -> *mut Self::Type;
1206}
1207
1208/// Projects a given field from `Self`.
1209///
1210/// Implementations of this trait encode the conditions under which a field can
1211/// be projected from a `Ptr<'_, Self, I>`, and how the invariants of that
1212/// [`Ptr`] (`I`) determine the invariants of pointers projected from it. In
1213/// other words, it is a type-level function over invariants; `I` goes in,
1214/// `Self::Invariants` comes out.
1215///
1216/// # Safety
1217///
1218/// `T: ProjectField<Field, I, VARIANT_ID, FIELD_ID>` if, for a
1219/// `ptr: Ptr<'_, T, I>` such that `T::is_projectable(ptr).is_ok()`,
1220/// `<T as HasField<Field, VARIANT_ID, FIELD_ID>>::project(ptr.as_inner())`
1221/// conforms to `T::Invariants`.
1222#[doc(hidden)]
1223pub unsafe trait ProjectField<Field, I, const VARIANT_ID: i128, const FIELD_ID: i128>:
1224 HasField<Field, VARIANT_ID, FIELD_ID>
1225where
1226 I: invariant::Invariants,
1227{
1228 fn only_derive_is_allowed_to_implement_this_trait()
1229 where
1230 Self: Sized;
1231
1232 /// The invariants of the projected field pointer, with respect to the
1233 /// invariants, `I`, of the containing pointer. The aliasing dimension of
1234 /// the invariants is guaranteed to remain unchanged.
1235 type Invariants: invariant::Invariants<Aliasing = I::Aliasing>;
1236
1237 /// The failure mode of projection. `()` if the projection is fallible,
1238 /// otherwise [`core::convert::Infallible`].
1239 type Error;
1240
1241 /// Is the given field projectable from `ptr`?
1242 ///
1243 /// If a field with [`Self::Invariants`] is projectable from the referent,
1244 /// this function produces an `Ok(ptr)` from which the projection can be
1245 /// made; otherwise `Err`.
1246 ///
1247 /// This method must be overriden if the field's projectability depends on
1248 /// the value of the bytes in `ptr`.
1249 #[inline(always)]
1250 fn is_projectable<'a>(_ptr: Ptr<'a, Self::Tag, I>) -> Result<(), Self::Error> {
1251 trait IsInfallible {
1252 const IS_INFALLIBLE: bool;
1253 }
1254
1255 struct Projection<T, Field, I, const VARIANT_ID: i128, const FIELD_ID: i128>(
1256 PhantomData<(Field, I, T)>,
1257 )
1258 where
1259 T: ?Sized + HasField<Field, VARIANT_ID, FIELD_ID>,
1260 I: invariant::Invariants;
1261
1262 impl<T, Field, I, const VARIANT_ID: i128, const FIELD_ID: i128> IsInfallible
1263 for Projection<T, Field, I, VARIANT_ID, FIELD_ID>
1264 where
1265 T: ?Sized + HasField<Field, VARIANT_ID, FIELD_ID>,
1266 I: invariant::Invariants,
1267 {
1268 const IS_INFALLIBLE: bool = {
1269 let is_infallible = match VARIANT_ID {
1270 // For nondestructive projections of struct and union
1271 // fields, the projected field's satisfaction of
1272 // `Invariants` does not depend on the value of the
1273 // referent. This default implementation of `is_projectable`
1274 // is non-destructive, as it does not overwrite any part of
1275 // the referent.
1276 crate::STRUCT_VARIANT_ID | crate::UNION_VARIANT_ID => true,
1277 _enum_variant => {
1278 use crate::invariant::{Validity, ValidityKind};
1279 match I::Validity::KIND {
1280 // The `Uninit` and `Initialized` validity
1281 // invariants do not depend on the enum's tag. In
1282 // particular, we don't actually care about what
1283 // variant is present – we can treat *any* range of
1284 // uninitialized or initialized memory as containing
1285 // an uninitialized or initialized instance of *any*
1286 // type – the type itself is irrelevant.
1287 ValidityKind::Uninit | ValidityKind::Initialized => true,
1288 // The projectability of an enum field from an
1289 // `AsInitialized` or `Valid` state is a dynamic
1290 // property of its tag.
1291 ValidityKind::AsInitialized | ValidityKind::Valid => false,
1292 }
1293 }
1294 };
1295 const_assert!(is_infallible);
1296 is_infallible
1297 };
1298 }
1299
1300 const_assert!(
1301 <Projection<Self, Field, I, VARIANT_ID, FIELD_ID> as IsInfallible>::IS_INFALLIBLE
1302 );
1303
1304 Ok(())
1305 }
1306}
1307
1308/// Analyzes whether a type is [`FromZeros`].
1309///
1310/// This derive analyzes, at compile time, whether the annotated type satisfies
1311/// the [safety conditions] of `FromZeros` and implements `FromZeros` and its
1312/// supertraits if it is sound to do so. This derive can be applied to structs,
1313/// enums, and unions; e.g.:
1314///
1315/// ```
1316/// # use zerocopy_derive::{FromZeros, Immutable};
1317/// #[derive(FromZeros)]
1318/// struct MyStruct {
1319/// # /*
1320/// ...
1321/// # */
1322/// }
1323///
1324/// #[derive(FromZeros)]
1325/// #[repr(u8)]
1326/// enum MyEnum {
1327/// # Variant0,
1328/// # /*
1329/// ...
1330/// # */
1331/// }
1332///
1333/// #[derive(FromZeros, Immutable)]
1334/// union MyUnion {
1335/// # variant: u8,
1336/// # /*
1337/// ...
1338/// # */
1339/// }
1340/// ```
1341///
1342/// [safety conditions]: trait@FromZeros#safety
1343///
1344/// # Analysis
1345///
1346/// *This section describes, roughly, the analysis performed by this derive to
1347/// determine whether it is sound to implement `FromZeros` for a given type.
1348/// Unless you are modifying the implementation of this derive, or attempting to
1349/// manually implement `FromZeros` for a type yourself, you don't need to read
1350/// this section.*
1351///
1352/// If a type has the following properties, then this derive can implement
1353/// `FromZeros` for that type:
1354///
1355/// - If the type is a struct, all of its fields must be `FromZeros`.
1356/// - If the type is an enum:
1357/// - It must have a defined representation (`repr`s `C`, `u8`, `u16`, `u32`,
1358/// `u64`, `usize`, `i8`, `i16`, `i32`, `i64`, or `isize`).
1359/// - It must have a variant with a discriminant/tag of `0`, and its fields
1360/// must be `FromZeros`. See [the reference] for a description of
1361/// discriminant values are specified.
1362/// - The fields of that variant must be `FromZeros`.
1363///
1364/// This analysis is subject to change. Unsafe code may *only* rely on the
1365/// documented [safety conditions] of `FromZeros`, and must *not* rely on the
1366/// implementation details of this derive.
1367///
1368/// [the reference]: https://doc.rust-lang.org/reference/items/enumerations.html#custom-discriminant-values-for-fieldless-enumerations
1369///
1370/// ## Why isn't an explicit representation required for structs?
1371///
1372/// Neither this derive, nor the [safety conditions] of `FromZeros`, requires
1373/// that structs are marked with `#[repr(C)]`.
1374///
1375/// Per the [Rust reference](reference),
1376///
1377/// > The representation of a type can change the padding between fields, but
1378/// > does not change the layout of the fields themselves.
1379///
1380/// [reference]: https://doc.rust-lang.org/reference/type-layout.html#representations
1381///
1382/// Since the layout of structs only consists of padding bytes and field bytes,
1383/// a struct is soundly `FromZeros` if:
1384/// 1. its padding is soundly `FromZeros`, and
1385/// 2. its fields are soundly `FromZeros`.
1386///
1387/// The answer to the first question is always yes: padding bytes do not have
1388/// any validity constraints. A [discussion] of this question in the Unsafe Code
1389/// Guidelines Working Group concluded that it would be virtually unimaginable
1390/// for future versions of rustc to add validity constraints to padding bytes.
1391///
1392/// [discussion]: https://github.com/rust-lang/unsafe-code-guidelines/issues/174
1393///
1394/// Whether a struct is soundly `FromZeros` therefore solely depends on whether
1395/// its fields are `FromZeros`.
1396// FIXME(#146): Document why we don't require an enum to have an explicit `repr`
1397// attribute.
1398#[cfg(any(feature = "derive", test))]
1399#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
1400pub use zerocopy_derive::FromZeros;
1401/// Analyzes whether a type is [`Immutable`].
1402///
1403/// This derive analyzes, at compile time, whether the annotated type satisfies
1404/// the [safety conditions] of `Immutable` and implements `Immutable` if it is
1405/// sound to do so. This derive can be applied to structs, enums, and unions;
1406/// e.g.:
1407///
1408/// ```
1409/// # use zerocopy_derive::Immutable;
1410/// #[derive(Immutable)]
1411/// struct MyStruct {
1412/// # /*
1413/// ...
1414/// # */
1415/// }
1416///
1417/// #[derive(Immutable)]
1418/// enum MyEnum {
1419/// # Variant0,
1420/// # /*
1421/// ...
1422/// # */
1423/// }
1424///
1425/// #[derive(Immutable)]
1426/// union MyUnion {
1427/// # variant: u8,
1428/// # /*
1429/// ...
1430/// # */
1431/// }
1432/// ```
1433///
1434/// # Analysis
1435///
1436/// *This section describes, roughly, the analysis performed by this derive to
1437/// determine whether it is sound to implement `Immutable` for a given type.
1438/// Unless you are modifying the implementation of this derive, you don't need
1439/// to read this section.*
1440///
1441/// If a type has the following properties, then this derive can implement
1442/// `Immutable` for that type:
1443///
1444/// - All fields must be `Immutable`.
1445///
1446/// This analysis is subject to change. Unsafe code may *only* rely on the
1447/// documented [safety conditions] of `Immutable`, and must *not* rely on the
1448/// implementation details of this derive.
1449///
1450/// [safety conditions]: trait@Immutable#safety
1451#[cfg(any(feature = "derive", test))]
1452#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
1453pub use zerocopy_derive::Immutable;
1454
1455/// Types which are free from interior mutability.
1456///
1457/// `T: Immutable` indicates that `T` does not permit interior mutation, except
1458/// by ownership or an exclusive (`&mut`) borrow.
1459///
1460/// # Implementation
1461///
1462/// **Do not implement this trait yourself!** Instead, use
1463/// [`#[derive(Immutable)]`][derive] (requires the `derive` Cargo feature);
1464/// e.g.:
1465///
1466/// ```
1467/// # use zerocopy_derive::Immutable;
1468/// #[derive(Immutable)]
1469/// struct MyStruct {
1470/// # /*
1471/// ...
1472/// # */
1473/// }
1474///
1475/// #[derive(Immutable)]
1476/// enum MyEnum {
1477/// # /*
1478/// ...
1479/// # */
1480/// }
1481///
1482/// #[derive(Immutable)]
1483/// union MyUnion {
1484/// # variant: u8,
1485/// # /*
1486/// ...
1487/// # */
1488/// }
1489/// ```
1490///
1491/// This derive performs a sophisticated, compile-time safety analysis to
1492/// determine whether a type is `Immutable`.
1493///
1494/// # Safety
1495///
1496/// Unsafe code outside of this crate must not make any assumptions about `T`
1497/// based on `T: Immutable`. We reserve the right to relax the requirements for
1498/// `Immutable` in the future, and if unsafe code outside of this crate makes
1499/// assumptions based on `T: Immutable`, future relaxations may cause that code
1500/// to become unsound.
1501///
1502// # Safety (Internal)
1503//
1504// If `T: Immutable`, unsafe code *inside of this crate* may assume that, given
1505// `t: &T`, `t` does not permit interior mutation of its referent. Because
1506// [`UnsafeCell`] is the only type which permits interior mutation, it is
1507// sufficient (though not necessary) to guarantee that `T` contains no
1508// `UnsafeCell`s.
1509//
1510// [`UnsafeCell`]: core::cell::UnsafeCell
1511#[cfg_attr(
1512 feature = "derive",
1513 doc = "[derive]: zerocopy_derive::Immutable",
1514 doc = "[derive-analysis]: zerocopy_derive::Immutable#analysis"
1515)]
1516#[cfg_attr(
1517 not(feature = "derive"),
1518 doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.Immutable.html"),
1519 doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.Immutable.html#analysis"),
1520)]
1521#[cfg_attr(
1522 not(no_zerocopy_diagnostic_on_unimplemented_1_78_0),
1523 diagnostic::on_unimplemented(note = "Consider adding `#[derive(Immutable)]` to `{Self}`")
1524)]
1525pub unsafe trait Immutable {
1526 // The `Self: Sized` bound makes it so that `Immutable` is still object
1527 // safe.
1528 #[doc(hidden)]
1529 fn only_derive_is_allowed_to_implement_this_trait()
1530 where
1531 Self: Sized;
1532}
1533
1534/// Implements [`TryFromBytes`].
1535///
1536/// This derive synthesizes the runtime checks required to check whether a
1537/// sequence of initialized bytes corresponds to a valid instance of a type.
1538/// This derive can be applied to structs, enums, and unions; e.g.:
1539///
1540/// ```
1541/// # use zerocopy_derive::{TryFromBytes, Immutable};
1542/// #[derive(TryFromBytes)]
1543/// struct MyStruct {
1544/// # /*
1545/// ...
1546/// # */
1547/// }
1548///
1549/// #[derive(TryFromBytes)]
1550/// #[repr(u8)]
1551/// enum MyEnum {
1552/// # V00,
1553/// # /*
1554/// ...
1555/// # */
1556/// }
1557///
1558/// #[derive(TryFromBytes, Immutable)]
1559/// union MyUnion {
1560/// # variant: u8,
1561/// # /*
1562/// ...
1563/// # */
1564/// }
1565/// ```
1566///
1567/// # Portability
1568///
1569/// To ensure consistent endianness for enums with multi-byte representations,
1570/// explicitly specify and convert each discriminant using `.to_le()` or
1571/// `.to_be()`; e.g.:
1572///
1573/// ```
1574/// # use zerocopy_derive::TryFromBytes;
1575/// // `DataStoreVersion` is encoded in little-endian.
1576/// #[derive(TryFromBytes)]
1577/// #[repr(u32)]
1578/// pub enum DataStoreVersion {
1579/// /// Version 1 of the data store.
1580/// V1 = 9u32.to_le(),
1581///
1582/// /// Version 2 of the data store.
1583/// V2 = 10u32.to_le(),
1584/// }
1585/// ```
1586///
1587/// [safety conditions]: trait@TryFromBytes#safety
1588#[cfg(any(feature = "derive", test))]
1589#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
1590pub use zerocopy_derive::TryFromBytes;
1591
1592/// Types for which some bit patterns are valid.
1593///
1594/// A memory region of the appropriate length which contains initialized bytes
1595/// can be viewed as a `TryFromBytes` type so long as the runtime value of those
1596/// bytes corresponds to a [*valid instance*] of that type. For example,
1597/// [`bool`] is `TryFromBytes`, so zerocopy can transmute a [`u8`] into a
1598/// [`bool`] so long as it first checks that the value of the [`u8`] is `0` or
1599/// `1`.
1600///
1601/// # Implementation
1602///
1603/// **Do not implement this trait yourself!** Instead, use
1604/// [`#[derive(TryFromBytes)]`][derive]; e.g.:
1605///
1606/// ```
1607/// # use zerocopy_derive::{TryFromBytes, Immutable};
1608/// #[derive(TryFromBytes)]
1609/// struct MyStruct {
1610/// # /*
1611/// ...
1612/// # */
1613/// }
1614///
1615/// #[derive(TryFromBytes)]
1616/// #[repr(u8)]
1617/// enum MyEnum {
1618/// # V00,
1619/// # /*
1620/// ...
1621/// # */
1622/// }
1623///
1624/// #[derive(TryFromBytes, Immutable)]
1625/// union MyUnion {
1626/// # variant: u8,
1627/// # /*
1628/// ...
1629/// # */
1630/// }
1631/// ```
1632///
1633/// This derive ensures that the runtime check of whether bytes correspond to a
1634/// valid instance is sound. You **must** implement this trait via the derive.
1635///
1636/// # What is a "valid instance"?
1637///
1638/// In Rust, each type has *bit validity*, which refers to the set of bit
1639/// patterns which may appear in an instance of that type. It is impossible for
1640/// safe Rust code to produce values which violate bit validity (ie, values
1641/// outside of the "valid" set of bit patterns). If `unsafe` code produces an
1642/// invalid value, this is considered [undefined behavior].
1643///
1644/// Rust's bit validity rules are currently being decided, which means that some
1645/// types have three classes of bit patterns: those which are definitely valid,
1646/// and whose validity is documented in the language; those which may or may not
1647/// be considered valid at some point in the future; and those which are
1648/// definitely invalid.
1649///
1650/// Zerocopy takes a conservative approach, and only considers a bit pattern to
1651/// be valid if its validity is a documented guarantee provided by the
1652/// language.
1653///
1654/// For most use cases, Rust's current guarantees align with programmers'
1655/// intuitions about what ought to be valid. As a result, zerocopy's
1656/// conservatism should not affect most users.
1657///
1658/// If you are negatively affected by lack of support for a particular type,
1659/// we encourage you to let us know by [filing an issue][github-repo].
1660///
1661/// # `TryFromBytes` is not symmetrical with [`IntoBytes`]
1662///
1663/// There are some types which implement both `TryFromBytes` and [`IntoBytes`],
1664/// but for which `TryFromBytes` is not guaranteed to accept all byte sequences
1665/// produced by `IntoBytes`. In other words, for some `T: TryFromBytes +
1666/// IntoBytes`, there exist values of `t: T` such that
1667/// `TryFromBytes::try_ref_from_bytes(t.as_bytes()) == None`. Code should not
1668/// generally assume that values produced by `IntoBytes` will necessarily be
1669/// accepted as valid by `TryFromBytes`.
1670///
1671/// # Safety
1672///
1673/// On its own, `T: TryFromBytes` does not make any guarantees about the layout
1674/// or representation of `T`. It merely provides the ability to perform a
1675/// validity check at runtime via methods like [`try_ref_from_bytes`].
1676///
1677/// You must not rely on the `#[doc(hidden)]` internals of `TryFromBytes`.
1678/// Future releases of zerocopy may make backwards-breaking changes to these
1679/// items, including changes that only affect soundness, which may cause code
1680/// which uses those items to silently become unsound.
1681///
1682/// [undefined behavior]: https://raphlinus.github.io/programming/rust/2018/08/17/undefined-behavior.html
1683/// [github-repo]: https://github.com/google/zerocopy
1684/// [`try_ref_from_bytes`]: TryFromBytes::try_ref_from_bytes
1685/// [*valid instance*]: #what-is-a-valid-instance
1686#[cfg_attr(feature = "derive", doc = "[derive]: zerocopy_derive::TryFromBytes")]
1687#[cfg_attr(
1688 not(feature = "derive"),
1689 doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.TryFromBytes.html"),
1690)]
1691#[cfg_attr(
1692 not(no_zerocopy_diagnostic_on_unimplemented_1_78_0),
1693 diagnostic::on_unimplemented(note = "Consider adding `#[derive(TryFromBytes)]` to `{Self}`")
1694)]
1695pub unsafe trait TryFromBytes {
1696 // The `Self: Sized` bound makes it so that `TryFromBytes` is still object
1697 // safe.
1698 #[doc(hidden)]
1699 fn only_derive_is_allowed_to_implement_this_trait()
1700 where
1701 Self: Sized;
1702
1703 /// Does a given memory range contain a valid instance of `Self`?
1704 ///
1705 /// # Safety
1706 ///
1707 /// Unsafe code may assume that, if `is_bit_valid(candidate)` returns true,
1708 /// `*candidate` contains a valid `Self`.
1709 ///
1710 /// # Panics
1711 ///
1712 /// `is_bit_valid` may panic. Callers are responsible for ensuring that any
1713 /// `unsafe` code remains sound even in the face of `is_bit_valid`
1714 /// panicking. (We support user-defined validation routines; so long as
1715 /// these routines are not required to be `unsafe`, there is no way to
1716 /// ensure that these do not generate panics.)
1717 ///
1718 /// Besides user-defined validation routines panicking, `is_bit_valid` will
1719 /// either panic or fail to compile if called on a pointer with [`Shared`]
1720 /// aliasing when `Self: !Immutable`.
1721 ///
1722 /// [`UnsafeCell`]: core::cell::UnsafeCell
1723 /// [`Shared`]: invariant::Shared
1724 #[doc(hidden)]
1725 fn is_bit_valid<A>(candidate: Maybe<'_, Self, A>) -> bool
1726 where
1727 A: invariant::Alignment;
1728
1729 /// Attempts to interpret the given `source` as a `&Self`.
1730 ///
1731 /// If the bytes of `source` are a valid instance of `Self`, this method
1732 /// returns a reference to those bytes interpreted as a `Self`. If the
1733 /// length of `source` is not a [valid size of `Self`][valid-size], or if
1734 /// `source` is not appropriately aligned, or if `source` is not a valid
1735 /// instance of `Self`, this returns `Err`. If [`Self:
1736 /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
1737 /// error][ConvertError::from].
1738 ///
1739 /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
1740 ///
1741 /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
1742 /// [self-unaligned]: Unaligned
1743 /// [slice-dst]: KnownLayout#dynamically-sized-types
1744 ///
1745 /// # Compile-Time Assertions
1746 ///
1747 /// This method cannot yet be used on unsized types whose dynamically-sized
1748 /// component is zero-sized. Attempting to use this method on such types
1749 /// results in a compile-time assertion error; e.g.:
1750 ///
1751 /// ```compile_fail,E0080
1752 /// use zerocopy::*;
1753 /// # use zerocopy_derive::*;
1754 ///
1755 /// #[derive(TryFromBytes, Immutable, KnownLayout)]
1756 /// #[repr(C)]
1757 /// struct ZSTy {
1758 /// leading_sized: u16,
1759 /// trailing_dst: [()],
1760 /// }
1761 ///
1762 /// let _ = ZSTy::try_ref_from_bytes(0u16.as_bytes()); // âš Compile Error!
1763 /// ```
1764 ///
1765 /// # Examples
1766 ///
1767 /// ```
1768 /// use zerocopy::TryFromBytes;
1769 /// # use zerocopy_derive::*;
1770 ///
1771 /// // The only valid value of this type is the byte `0xC0`
1772 /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1773 /// #[repr(u8)]
1774 /// enum C0 { xC0 = 0xC0 }
1775 ///
1776 /// // The only valid value of this type is the byte sequence `0xC0C0`.
1777 /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1778 /// #[repr(C)]
1779 /// struct C0C0(C0, C0);
1780 ///
1781 /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1782 /// #[repr(C)]
1783 /// struct Packet {
1784 /// magic_number: C0C0,
1785 /// mug_size: u8,
1786 /// temperature: u8,
1787 /// marshmallows: [[u8; 2]],
1788 /// }
1789 ///
1790 /// let bytes = &[0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5][..];
1791 ///
1792 /// let packet = Packet::try_ref_from_bytes(bytes).unwrap();
1793 ///
1794 /// assert_eq!(packet.mug_size, 240);
1795 /// assert_eq!(packet.temperature, 77);
1796 /// assert_eq!(packet.marshmallows, [[0, 1], [2, 3], [4, 5]]);
1797 ///
1798 /// // These bytes are not valid instance of `Packet`.
1799 /// let bytes = &[0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5][..];
1800 /// assert!(Packet::try_ref_from_bytes(bytes).is_err());
1801 /// ```
1802 ///
1803 #[doc = codegen_section!(
1804 header = "h5",
1805 bench = "try_ref_from_bytes",
1806 format = "coco",
1807 arity = 3,
1808 [
1809 open
1810 @index 1
1811 @title "Sized"
1812 @variant "static_size"
1813 ],
1814 [
1815 @index 2
1816 @title "Unsized"
1817 @variant "dynamic_size"
1818 ],
1819 [
1820 @index 3
1821 @title "Dynamically Padded"
1822 @variant "dynamic_padding"
1823 ]
1824 )]
1825 #[must_use = "has no side effects"]
1826 #[cfg_attr(zerocopy_inline_always, inline(always))]
1827 #[cfg_attr(not(zerocopy_inline_always), inline)]
1828 fn try_ref_from_bytes(source: &[u8]) -> Result<&Self, TryCastError<&[u8], Self>>
1829 where
1830 Self: KnownLayout + Immutable,
1831 {
1832 static_assert_dst_is_not_zst!(Self);
1833 match Ptr::from_ref(source).try_cast_into_no_leftover::<Self, BecauseImmutable>(None) {
1834 Ok(source) => {
1835 // This call may panic. If that happens, it doesn't cause any soundness
1836 // issues, as we have not generated any invalid state which we need to
1837 // fix before returning.
1838 match source.try_into_valid() {
1839 Ok(valid) => Ok(valid.as_ref()),
1840 Err(e) => {
1841 Err(e.map_src(|src| src.as_bytes::<BecauseImmutable>().as_ref()).into())
1842 }
1843 }
1844 }
1845 Err(e) => Err(e.map_src(Ptr::as_ref).into()),
1846 }
1847 }
1848
1849 /// Attempts to interpret the prefix of the given `source` as a `&Self`.
1850 ///
1851 /// This method computes the [largest possible size of `Self`][valid-size]
1852 /// that can fit in the leading bytes of `source`. If that prefix is a valid
1853 /// instance of `Self`, this method returns a reference to those bytes
1854 /// interpreted as `Self`, and a reference to the remaining bytes. If there
1855 /// are insufficient bytes, or if `source` is not appropriately aligned, or
1856 /// if those bytes are not a valid instance of `Self`, this returns `Err`.
1857 /// If [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
1858 /// alignment error][ConvertError::from].
1859 ///
1860 /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
1861 ///
1862 /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
1863 /// [self-unaligned]: Unaligned
1864 /// [slice-dst]: KnownLayout#dynamically-sized-types
1865 ///
1866 /// # Compile-Time Assertions
1867 ///
1868 /// This method cannot yet be used on unsized types whose dynamically-sized
1869 /// component is zero-sized. Attempting to use this method on such types
1870 /// results in a compile-time assertion error; e.g.:
1871 ///
1872 /// ```compile_fail,E0080
1873 /// use zerocopy::*;
1874 /// # use zerocopy_derive::*;
1875 ///
1876 /// #[derive(TryFromBytes, Immutable, KnownLayout)]
1877 /// #[repr(C)]
1878 /// struct ZSTy {
1879 /// leading_sized: u16,
1880 /// trailing_dst: [()],
1881 /// }
1882 ///
1883 /// let _ = ZSTy::try_ref_from_prefix(0u16.as_bytes()); // âš Compile Error!
1884 /// ```
1885 ///
1886 /// # Examples
1887 ///
1888 /// ```
1889 /// use zerocopy::TryFromBytes;
1890 /// # use zerocopy_derive::*;
1891 ///
1892 /// // The only valid value of this type is the byte `0xC0`
1893 /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1894 /// #[repr(u8)]
1895 /// enum C0 { xC0 = 0xC0 }
1896 ///
1897 /// // The only valid value of this type is the bytes `0xC0C0`.
1898 /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1899 /// #[repr(C)]
1900 /// struct C0C0(C0, C0);
1901 ///
1902 /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1903 /// #[repr(C)]
1904 /// struct Packet {
1905 /// magic_number: C0C0,
1906 /// mug_size: u8,
1907 /// temperature: u8,
1908 /// marshmallows: [[u8; 2]],
1909 /// }
1910 ///
1911 /// // These are more bytes than are needed to encode a `Packet`.
1912 /// let bytes = &[0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
1913 ///
1914 /// let (packet, suffix) = Packet::try_ref_from_prefix(bytes).unwrap();
1915 ///
1916 /// assert_eq!(packet.mug_size, 240);
1917 /// assert_eq!(packet.temperature, 77);
1918 /// assert_eq!(packet.marshmallows, [[0, 1], [2, 3], [4, 5]]);
1919 /// assert_eq!(suffix, &[6u8][..]);
1920 ///
1921 /// // These bytes are not valid instance of `Packet`.
1922 /// let bytes = &[0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
1923 /// assert!(Packet::try_ref_from_prefix(bytes).is_err());
1924 /// ```
1925 ///
1926 #[doc = codegen_section!(
1927 header = "h5",
1928 bench = "try_ref_from_prefix",
1929 format = "coco",
1930 arity = 3,
1931 [
1932 open
1933 @index 1
1934 @title "Sized"
1935 @variant "static_size"
1936 ],
1937 [
1938 @index 2
1939 @title "Unsized"
1940 @variant "dynamic_size"
1941 ],
1942 [
1943 @index 3
1944 @title "Dynamically Padded"
1945 @variant "dynamic_padding"
1946 ]
1947 )]
1948 #[must_use = "has no side effects"]
1949 #[cfg_attr(zerocopy_inline_always, inline(always))]
1950 #[cfg_attr(not(zerocopy_inline_always), inline)]
1951 fn try_ref_from_prefix(source: &[u8]) -> Result<(&Self, &[u8]), TryCastError<&[u8], Self>>
1952 where
1953 Self: KnownLayout + Immutable,
1954 {
1955 static_assert_dst_is_not_zst!(Self);
1956 try_ref_from_prefix_suffix(source, CastType::Prefix, None)
1957 }
1958
1959 /// Attempts to interpret the suffix of the given `source` as a `&Self`.
1960 ///
1961 /// This method computes the [largest possible size of `Self`][valid-size]
1962 /// that can fit in the trailing bytes of `source`. If that suffix is a
1963 /// valid instance of `Self`, this method returns a reference to those bytes
1964 /// interpreted as `Self`, and a reference to the preceding bytes. If there
1965 /// are insufficient bytes, or if the suffix of `source` would not be
1966 /// appropriately aligned, or if the suffix is not a valid instance of
1967 /// `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned], you
1968 /// can [infallibly discard the alignment error][ConvertError::from].
1969 ///
1970 /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
1971 ///
1972 /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
1973 /// [self-unaligned]: Unaligned
1974 /// [slice-dst]: KnownLayout#dynamically-sized-types
1975 ///
1976 /// # Compile-Time Assertions
1977 ///
1978 /// This method cannot yet be used on unsized types whose dynamically-sized
1979 /// component is zero-sized. Attempting to use this method on such types
1980 /// results in a compile-time assertion error; e.g.:
1981 ///
1982 /// ```compile_fail,E0080
1983 /// use zerocopy::*;
1984 /// # use zerocopy_derive::*;
1985 ///
1986 /// #[derive(TryFromBytes, Immutable, KnownLayout)]
1987 /// #[repr(C)]
1988 /// struct ZSTy {
1989 /// leading_sized: u16,
1990 /// trailing_dst: [()],
1991 /// }
1992 ///
1993 /// let _ = ZSTy::try_ref_from_suffix(0u16.as_bytes()); // âš Compile Error!
1994 /// ```
1995 ///
1996 /// # Examples
1997 ///
1998 /// ```
1999 /// use zerocopy::TryFromBytes;
2000 /// # use zerocopy_derive::*;
2001 ///
2002 /// // The only valid value of this type is the byte `0xC0`
2003 /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2004 /// #[repr(u8)]
2005 /// enum C0 { xC0 = 0xC0 }
2006 ///
2007 /// // The only valid value of this type is the bytes `0xC0C0`.
2008 /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2009 /// #[repr(C)]
2010 /// struct C0C0(C0, C0);
2011 ///
2012 /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2013 /// #[repr(C)]
2014 /// struct Packet {
2015 /// magic_number: C0C0,
2016 /// mug_size: u8,
2017 /// temperature: u8,
2018 /// marshmallows: [[u8; 2]],
2019 /// }
2020 ///
2021 /// // These are more bytes than are needed to encode a `Packet`.
2022 /// let bytes = &[0, 0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
2023 ///
2024 /// let (prefix, packet) = Packet::try_ref_from_suffix(bytes).unwrap();
2025 ///
2026 /// assert_eq!(packet.mug_size, 240);
2027 /// assert_eq!(packet.temperature, 77);
2028 /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2029 /// assert_eq!(prefix, &[0u8][..]);
2030 ///
2031 /// // These bytes are not valid instance of `Packet`.
2032 /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 77, 240, 0xC0, 0x10][..];
2033 /// assert!(Packet::try_ref_from_suffix(bytes).is_err());
2034 /// ```
2035 ///
2036 #[doc = codegen_section!(
2037 header = "h5",
2038 bench = "try_ref_from_suffix",
2039 format = "coco",
2040 arity = 3,
2041 [
2042 open
2043 @index 1
2044 @title "Sized"
2045 @variant "static_size"
2046 ],
2047 [
2048 @index 2
2049 @title "Unsized"
2050 @variant "dynamic_size"
2051 ],
2052 [
2053 @index 3
2054 @title "Dynamically Padded"
2055 @variant "dynamic_padding"
2056 ]
2057 )]
2058 #[must_use = "has no side effects"]
2059 #[cfg_attr(zerocopy_inline_always, inline(always))]
2060 #[cfg_attr(not(zerocopy_inline_always), inline)]
2061 fn try_ref_from_suffix(source: &[u8]) -> Result<(&[u8], &Self), TryCastError<&[u8], Self>>
2062 where
2063 Self: KnownLayout + Immutable,
2064 {
2065 static_assert_dst_is_not_zst!(Self);
2066 try_ref_from_prefix_suffix(source, CastType::Suffix, None).map(swap)
2067 }
2068
2069 /// Attempts to interpret the given `source` as a `&mut Self` without
2070 /// copying.
2071 ///
2072 /// If the bytes of `source` are a valid instance of `Self`, this method
2073 /// returns a reference to those bytes interpreted as a `Self`. If the
2074 /// length of `source` is not a [valid size of `Self`][valid-size], or if
2075 /// `source` is not appropriately aligned, or if `source` is not a valid
2076 /// instance of `Self`, this returns `Err`. If [`Self:
2077 /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
2078 /// error][ConvertError::from].
2079 ///
2080 /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
2081 ///
2082 /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
2083 /// [self-unaligned]: Unaligned
2084 /// [slice-dst]: KnownLayout#dynamically-sized-types
2085 ///
2086 /// # Compile-Time Assertions
2087 ///
2088 /// This method cannot yet be used on unsized types whose dynamically-sized
2089 /// component is zero-sized. Attempting to use this method on such types
2090 /// results in a compile-time assertion error; e.g.:
2091 ///
2092 /// ```compile_fail,E0080
2093 /// use zerocopy::*;
2094 /// # use zerocopy_derive::*;
2095 ///
2096 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2097 /// #[repr(C, packed)]
2098 /// struct ZSTy {
2099 /// leading_sized: [u8; 2],
2100 /// trailing_dst: [()],
2101 /// }
2102 ///
2103 /// let mut source = [85, 85];
2104 /// let _ = ZSTy::try_mut_from_bytes(&mut source[..]); // âš Compile Error!
2105 /// ```
2106 ///
2107 /// # Examples
2108 ///
2109 /// ```
2110 /// use zerocopy::TryFromBytes;
2111 /// # use zerocopy_derive::*;
2112 ///
2113 /// // The only valid value of this type is the byte `0xC0`
2114 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2115 /// #[repr(u8)]
2116 /// enum C0 { xC0 = 0xC0 }
2117 ///
2118 /// // The only valid value of this type is the bytes `0xC0C0`.
2119 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2120 /// #[repr(C)]
2121 /// struct C0C0(C0, C0);
2122 ///
2123 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2124 /// #[repr(C, packed)]
2125 /// struct Packet {
2126 /// magic_number: C0C0,
2127 /// mug_size: u8,
2128 /// temperature: u8,
2129 /// marshmallows: [[u8; 2]],
2130 /// }
2131 ///
2132 /// let bytes = &mut [0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5][..];
2133 ///
2134 /// let packet = Packet::try_mut_from_bytes(bytes).unwrap();
2135 ///
2136 /// assert_eq!(packet.mug_size, 240);
2137 /// assert_eq!(packet.temperature, 77);
2138 /// assert_eq!(packet.marshmallows, [[0, 1], [2, 3], [4, 5]]);
2139 ///
2140 /// packet.temperature = 111;
2141 ///
2142 /// assert_eq!(bytes, [0xC0, 0xC0, 240, 111, 0, 1, 2, 3, 4, 5]);
2143 ///
2144 /// // These bytes are not valid instance of `Packet`.
2145 /// let bytes = &mut [0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
2146 /// assert!(Packet::try_mut_from_bytes(bytes).is_err());
2147 /// ```
2148 ///
2149 #[doc = codegen_header!("h5", "try_mut_from_bytes")]
2150 ///
2151 /// See [`TryFromBytes::try_ref_from_bytes`](#method.try_ref_from_bytes.codegen).
2152 #[must_use = "has no side effects"]
2153 #[cfg_attr(zerocopy_inline_always, inline(always))]
2154 #[cfg_attr(not(zerocopy_inline_always), inline)]
2155 fn try_mut_from_bytes(bytes: &mut [u8]) -> Result<&mut Self, TryCastError<&mut [u8], Self>>
2156 where
2157 Self: KnownLayout + IntoBytes,
2158 {
2159 static_assert_dst_is_not_zst!(Self);
2160 match Ptr::from_mut(bytes).try_cast_into_no_leftover::<Self, BecauseExclusive>(None) {
2161 Ok(source) => {
2162 // This call may panic. If that happens, it doesn't cause any soundness
2163 // issues, as we have not generated any invalid state which we need to
2164 // fix before returning.
2165 match source.try_into_valid() {
2166 Ok(source) => Ok(source.as_mut()),
2167 Err(e) => Err(e.map_src(|src| src.as_bytes().as_mut()).into()),
2168 }
2169 }
2170 Err(e) => Err(e.map_src(Ptr::as_mut).into()),
2171 }
2172 }
2173
2174 /// Attempts to interpret the prefix of the given `source` as a `&mut
2175 /// Self`.
2176 ///
2177 /// This method computes the [largest possible size of `Self`][valid-size]
2178 /// that can fit in the leading bytes of `source`. If that prefix is a valid
2179 /// instance of `Self`, this method returns a reference to those bytes
2180 /// interpreted as `Self`, and a reference to the remaining bytes. If there
2181 /// are insufficient bytes, or if `source` is not appropriately aligned, or
2182 /// if the bytes are not a valid instance of `Self`, this returns `Err`. If
2183 /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
2184 /// alignment error][ConvertError::from].
2185 ///
2186 /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
2187 ///
2188 /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
2189 /// [self-unaligned]: Unaligned
2190 /// [slice-dst]: KnownLayout#dynamically-sized-types
2191 ///
2192 /// # Compile-Time Assertions
2193 ///
2194 /// This method cannot yet be used on unsized types whose dynamically-sized
2195 /// component is zero-sized. Attempting to use this method on such types
2196 /// results in a compile-time assertion error; e.g.:
2197 ///
2198 /// ```compile_fail,E0080
2199 /// use zerocopy::*;
2200 /// # use zerocopy_derive::*;
2201 ///
2202 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2203 /// #[repr(C, packed)]
2204 /// struct ZSTy {
2205 /// leading_sized: [u8; 2],
2206 /// trailing_dst: [()],
2207 /// }
2208 ///
2209 /// let mut source = [85, 85];
2210 /// let _ = ZSTy::try_mut_from_prefix(&mut source[..]); // âš Compile Error!
2211 /// ```
2212 ///
2213 /// # Examples
2214 ///
2215 /// ```
2216 /// use zerocopy::TryFromBytes;
2217 /// # use zerocopy_derive::*;
2218 ///
2219 /// // The only valid value of this type is the byte `0xC0`
2220 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2221 /// #[repr(u8)]
2222 /// enum C0 { xC0 = 0xC0 }
2223 ///
2224 /// // The only valid value of this type is the bytes `0xC0C0`.
2225 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2226 /// #[repr(C)]
2227 /// struct C0C0(C0, C0);
2228 ///
2229 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2230 /// #[repr(C, packed)]
2231 /// struct Packet {
2232 /// magic_number: C0C0,
2233 /// mug_size: u8,
2234 /// temperature: u8,
2235 /// marshmallows: [[u8; 2]],
2236 /// }
2237 ///
2238 /// // These are more bytes than are needed to encode a `Packet`.
2239 /// let bytes = &mut [0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
2240 ///
2241 /// let (packet, suffix) = Packet::try_mut_from_prefix(bytes).unwrap();
2242 ///
2243 /// assert_eq!(packet.mug_size, 240);
2244 /// assert_eq!(packet.temperature, 77);
2245 /// assert_eq!(packet.marshmallows, [[0, 1], [2, 3], [4, 5]]);
2246 /// assert_eq!(suffix, &[6u8][..]);
2247 ///
2248 /// packet.temperature = 111;
2249 /// suffix[0] = 222;
2250 ///
2251 /// assert_eq!(bytes, [0xC0, 0xC0, 240, 111, 0, 1, 2, 3, 4, 5, 222]);
2252 ///
2253 /// // These bytes are not valid instance of `Packet`.
2254 /// let bytes = &mut [0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
2255 /// assert!(Packet::try_mut_from_prefix(bytes).is_err());
2256 /// ```
2257 ///
2258 #[doc = codegen_header!("h5", "try_mut_from_prefix")]
2259 ///
2260 /// See [`TryFromBytes::try_ref_from_prefix`](#method.try_ref_from_prefix.codegen).
2261 #[must_use = "has no side effects"]
2262 #[cfg_attr(zerocopy_inline_always, inline(always))]
2263 #[cfg_attr(not(zerocopy_inline_always), inline)]
2264 fn try_mut_from_prefix(
2265 source: &mut [u8],
2266 ) -> Result<(&mut Self, &mut [u8]), TryCastError<&mut [u8], Self>>
2267 where
2268 Self: KnownLayout + IntoBytes,
2269 {
2270 static_assert_dst_is_not_zst!(Self);
2271 try_mut_from_prefix_suffix(source, CastType::Prefix, None)
2272 }
2273
2274 /// Attempts to interpret the suffix of the given `source` as a `&mut
2275 /// Self`.
2276 ///
2277 /// This method computes the [largest possible size of `Self`][valid-size]
2278 /// that can fit in the trailing bytes of `source`. If that suffix is a
2279 /// valid instance of `Self`, this method returns a reference to those bytes
2280 /// interpreted as `Self`, and a reference to the preceding bytes. If there
2281 /// are insufficient bytes, or if the suffix of `source` would not be
2282 /// appropriately aligned, or if the suffix is not a valid instance of
2283 /// `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned], you
2284 /// can [infallibly discard the alignment error][ConvertError::from].
2285 ///
2286 /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
2287 ///
2288 /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
2289 /// [self-unaligned]: Unaligned
2290 /// [slice-dst]: KnownLayout#dynamically-sized-types
2291 ///
2292 /// # Compile-Time Assertions
2293 ///
2294 /// This method cannot yet be used on unsized types whose dynamically-sized
2295 /// component is zero-sized. Attempting to use this method on such types
2296 /// results in a compile-time assertion error; e.g.:
2297 ///
2298 /// ```compile_fail,E0080
2299 /// use zerocopy::*;
2300 /// # use zerocopy_derive::*;
2301 ///
2302 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2303 /// #[repr(C, packed)]
2304 /// struct ZSTy {
2305 /// leading_sized: u16,
2306 /// trailing_dst: [()],
2307 /// }
2308 ///
2309 /// let mut source = [85, 85];
2310 /// let _ = ZSTy::try_mut_from_suffix(&mut source[..]); // âš Compile Error!
2311 /// ```
2312 ///
2313 /// # Examples
2314 ///
2315 /// ```
2316 /// use zerocopy::TryFromBytes;
2317 /// # use zerocopy_derive::*;
2318 ///
2319 /// // The only valid value of this type is the byte `0xC0`
2320 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2321 /// #[repr(u8)]
2322 /// enum C0 { xC0 = 0xC0 }
2323 ///
2324 /// // The only valid value of this type is the bytes `0xC0C0`.
2325 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2326 /// #[repr(C)]
2327 /// struct C0C0(C0, C0);
2328 ///
2329 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2330 /// #[repr(C, packed)]
2331 /// struct Packet {
2332 /// magic_number: C0C0,
2333 /// mug_size: u8,
2334 /// temperature: u8,
2335 /// marshmallows: [[u8; 2]],
2336 /// }
2337 ///
2338 /// // These are more bytes than are needed to encode a `Packet`.
2339 /// let bytes = &mut [0, 0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
2340 ///
2341 /// let (prefix, packet) = Packet::try_mut_from_suffix(bytes).unwrap();
2342 ///
2343 /// assert_eq!(packet.mug_size, 240);
2344 /// assert_eq!(packet.temperature, 77);
2345 /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2346 /// assert_eq!(prefix, &[0u8][..]);
2347 ///
2348 /// prefix[0] = 111;
2349 /// packet.temperature = 222;
2350 ///
2351 /// assert_eq!(bytes, [111, 0xC0, 0xC0, 240, 222, 2, 3, 4, 5, 6, 7]);
2352 ///
2353 /// // These bytes are not valid instance of `Packet`.
2354 /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 77, 240, 0xC0, 0x10][..];
2355 /// assert!(Packet::try_mut_from_suffix(bytes).is_err());
2356 /// ```
2357 ///
2358 #[doc = codegen_header!("h5", "try_mut_from_suffix")]
2359 ///
2360 /// See [`TryFromBytes::try_ref_from_suffix`](#method.try_ref_from_suffix.codegen).
2361 #[must_use = "has no side effects"]
2362 #[cfg_attr(zerocopy_inline_always, inline(always))]
2363 #[cfg_attr(not(zerocopy_inline_always), inline)]
2364 fn try_mut_from_suffix(
2365 source: &mut [u8],
2366 ) -> Result<(&mut [u8], &mut Self), TryCastError<&mut [u8], Self>>
2367 where
2368 Self: KnownLayout + IntoBytes,
2369 {
2370 static_assert_dst_is_not_zst!(Self);
2371 try_mut_from_prefix_suffix(source, CastType::Suffix, None).map(swap)
2372 }
2373
2374 /// Attempts to interpret the given `source` as a `&Self` with a DST length
2375 /// equal to `count`.
2376 ///
2377 /// This method attempts to return a reference to `source` interpreted as a
2378 /// `Self` with `count` trailing elements. If the length of `source` is not
2379 /// equal to the size of `Self` with `count` elements, if `source` is not
2380 /// appropriately aligned, or if `source` does not contain a valid instance
2381 /// of `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned],
2382 /// you can [infallibly discard the alignment error][ConvertError::from].
2383 ///
2384 /// [self-unaligned]: Unaligned
2385 /// [slice-dst]: KnownLayout#dynamically-sized-types
2386 ///
2387 /// # Examples
2388 ///
2389 /// ```
2390 /// # #![allow(non_camel_case_types)] // For C0::xC0
2391 /// use zerocopy::TryFromBytes;
2392 /// # use zerocopy_derive::*;
2393 ///
2394 /// // The only valid value of this type is the byte `0xC0`
2395 /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2396 /// #[repr(u8)]
2397 /// enum C0 { xC0 = 0xC0 }
2398 ///
2399 /// // The only valid value of this type is the bytes `0xC0C0`.
2400 /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2401 /// #[repr(C)]
2402 /// struct C0C0(C0, C0);
2403 ///
2404 /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2405 /// #[repr(C)]
2406 /// struct Packet {
2407 /// magic_number: C0C0,
2408 /// mug_size: u8,
2409 /// temperature: u8,
2410 /// marshmallows: [[u8; 2]],
2411 /// }
2412 ///
2413 /// let bytes = &[0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
2414 ///
2415 /// let packet = Packet::try_ref_from_bytes_with_elems(bytes, 3).unwrap();
2416 ///
2417 /// assert_eq!(packet.mug_size, 240);
2418 /// assert_eq!(packet.temperature, 77);
2419 /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2420 ///
2421 /// // These bytes are not valid instance of `Packet`.
2422 /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 77, 240, 0xC0, 0xC0][..];
2423 /// assert!(Packet::try_ref_from_bytes_with_elems(bytes, 3).is_err());
2424 /// ```
2425 ///
2426 /// Since an explicit `count` is provided, this method supports types with
2427 /// zero-sized trailing slice elements. Methods such as [`try_ref_from_bytes`]
2428 /// which do not take an explicit count do not support such types.
2429 ///
2430 /// ```
2431 /// use core::num::NonZeroU16;
2432 /// use zerocopy::*;
2433 /// # use zerocopy_derive::*;
2434 ///
2435 /// #[derive(TryFromBytes, Immutable, KnownLayout)]
2436 /// #[repr(C)]
2437 /// struct ZSTy {
2438 /// leading_sized: NonZeroU16,
2439 /// trailing_dst: [()],
2440 /// }
2441 ///
2442 /// let src = 0xCAFEu16.as_bytes();
2443 /// let zsty = ZSTy::try_ref_from_bytes_with_elems(src, 42).unwrap();
2444 /// assert_eq!(zsty.trailing_dst.len(), 42);
2445 /// ```
2446 ///
2447 /// [`try_ref_from_bytes`]: TryFromBytes::try_ref_from_bytes
2448 ///
2449 #[doc = codegen_section!(
2450 header = "h5",
2451 bench = "try_ref_from_bytes_with_elems",
2452 format = "coco",
2453 arity = 2,
2454 [
2455 open
2456 @index 1
2457 @title "Unsized"
2458 @variant "dynamic_size"
2459 ],
2460 [
2461 @index 2
2462 @title "Dynamically Padded"
2463 @variant "dynamic_padding"
2464 ]
2465 )]
2466 #[must_use = "has no side effects"]
2467 #[cfg_attr(zerocopy_inline_always, inline(always))]
2468 #[cfg_attr(not(zerocopy_inline_always), inline)]
2469 fn try_ref_from_bytes_with_elems(
2470 source: &[u8],
2471 count: usize,
2472 ) -> Result<&Self, TryCastError<&[u8], Self>>
2473 where
2474 Self: KnownLayout<PointerMetadata = usize> + Immutable,
2475 {
2476 match Ptr::from_ref(source).try_cast_into_no_leftover::<Self, BecauseImmutable>(Some(count))
2477 {
2478 Ok(source) => {
2479 // This call may panic. If that happens, it doesn't cause any soundness
2480 // issues, as we have not generated any invalid state which we need to
2481 // fix before returning.
2482 match source.try_into_valid() {
2483 Ok(source) => Ok(source.as_ref()),
2484 Err(e) => {
2485 Err(e.map_src(|src| src.as_bytes::<BecauseImmutable>().as_ref()).into())
2486 }
2487 }
2488 }
2489 Err(e) => Err(e.map_src(Ptr::as_ref).into()),
2490 }
2491 }
2492
2493 /// Attempts to interpret the prefix of the given `source` as a `&Self` with
2494 /// a DST length equal to `count`.
2495 ///
2496 /// This method attempts to return a reference to the prefix of `source`
2497 /// interpreted as a `Self` with `count` trailing elements, and a reference
2498 /// to the remaining bytes. If the length of `source` is less than the size
2499 /// of `Self` with `count` elements, if `source` is not appropriately
2500 /// aligned, or if the prefix of `source` does not contain a valid instance
2501 /// of `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned],
2502 /// you can [infallibly discard the alignment error][ConvertError::from].
2503 ///
2504 /// [self-unaligned]: Unaligned
2505 /// [slice-dst]: KnownLayout#dynamically-sized-types
2506 ///
2507 /// # Examples
2508 ///
2509 /// ```
2510 /// # #![allow(non_camel_case_types)] // For C0::xC0
2511 /// use zerocopy::TryFromBytes;
2512 /// # use zerocopy_derive::*;
2513 ///
2514 /// // The only valid value of this type is the byte `0xC0`
2515 /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2516 /// #[repr(u8)]
2517 /// enum C0 { xC0 = 0xC0 }
2518 ///
2519 /// // The only valid value of this type is the bytes `0xC0C0`.
2520 /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2521 /// #[repr(C)]
2522 /// struct C0C0(C0, C0);
2523 ///
2524 /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2525 /// #[repr(C)]
2526 /// struct Packet {
2527 /// magic_number: C0C0,
2528 /// mug_size: u8,
2529 /// temperature: u8,
2530 /// marshmallows: [[u8; 2]],
2531 /// }
2532 ///
2533 /// let bytes = &[0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7, 8][..];
2534 ///
2535 /// let (packet, suffix) = Packet::try_ref_from_prefix_with_elems(bytes, 3).unwrap();
2536 ///
2537 /// assert_eq!(packet.mug_size, 240);
2538 /// assert_eq!(packet.temperature, 77);
2539 /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2540 /// assert_eq!(suffix, &[8u8][..]);
2541 ///
2542 /// // These bytes are not valid instance of `Packet`.
2543 /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 77, 240, 0xC0, 0xC0][..];
2544 /// assert!(Packet::try_ref_from_prefix_with_elems(bytes, 3).is_err());
2545 /// ```
2546 ///
2547 /// Since an explicit `count` is provided, this method supports types with
2548 /// zero-sized trailing slice elements. Methods such as [`try_ref_from_prefix`]
2549 /// which do not take an explicit count do not support such types.
2550 ///
2551 /// ```
2552 /// use core::num::NonZeroU16;
2553 /// use zerocopy::*;
2554 /// # use zerocopy_derive::*;
2555 ///
2556 /// #[derive(TryFromBytes, Immutable, KnownLayout)]
2557 /// #[repr(C)]
2558 /// struct ZSTy {
2559 /// leading_sized: NonZeroU16,
2560 /// trailing_dst: [()],
2561 /// }
2562 ///
2563 /// let src = 0xCAFEu16.as_bytes();
2564 /// let (zsty, _) = ZSTy::try_ref_from_prefix_with_elems(src, 42).unwrap();
2565 /// assert_eq!(zsty.trailing_dst.len(), 42);
2566 /// ```
2567 ///
2568 /// [`try_ref_from_prefix`]: TryFromBytes::try_ref_from_prefix
2569 ///
2570 #[doc = codegen_section!(
2571 header = "h5",
2572 bench = "try_ref_from_prefix_with_elems",
2573 format = "coco",
2574 arity = 2,
2575 [
2576 open
2577 @index 1
2578 @title "Unsized"
2579 @variant "dynamic_size"
2580 ],
2581 [
2582 @index 2
2583 @title "Dynamically Padded"
2584 @variant "dynamic_padding"
2585 ]
2586 )]
2587 #[must_use = "has no side effects"]
2588 #[cfg_attr(zerocopy_inline_always, inline(always))]
2589 #[cfg_attr(not(zerocopy_inline_always), inline)]
2590 fn try_ref_from_prefix_with_elems(
2591 source: &[u8],
2592 count: usize,
2593 ) -> Result<(&Self, &[u8]), TryCastError<&[u8], Self>>
2594 where
2595 Self: KnownLayout<PointerMetadata = usize> + Immutable,
2596 {
2597 try_ref_from_prefix_suffix(source, CastType::Prefix, Some(count))
2598 }
2599
2600 /// Attempts to interpret the suffix of the given `source` as a `&Self` with
2601 /// a DST length equal to `count`.
2602 ///
2603 /// This method attempts to return a reference to the suffix of `source`
2604 /// interpreted as a `Self` with `count` trailing elements, and a reference
2605 /// to the preceding bytes. If the length of `source` is less than the size
2606 /// of `Self` with `count` elements, if the suffix of `source` is not
2607 /// appropriately aligned, or if the suffix of `source` does not contain a
2608 /// valid instance of `Self`, this returns `Err`. If [`Self:
2609 /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
2610 /// error][ConvertError::from].
2611 ///
2612 /// [self-unaligned]: Unaligned
2613 /// [slice-dst]: KnownLayout#dynamically-sized-types
2614 ///
2615 /// # Examples
2616 ///
2617 /// ```
2618 /// # #![allow(non_camel_case_types)] // For C0::xC0
2619 /// use zerocopy::TryFromBytes;
2620 /// # use zerocopy_derive::*;
2621 ///
2622 /// // The only valid value of this type is the byte `0xC0`
2623 /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2624 /// #[repr(u8)]
2625 /// enum C0 { xC0 = 0xC0 }
2626 ///
2627 /// // The only valid value of this type is the bytes `0xC0C0`.
2628 /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2629 /// #[repr(C)]
2630 /// struct C0C0(C0, C0);
2631 ///
2632 /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2633 /// #[repr(C)]
2634 /// struct Packet {
2635 /// magic_number: C0C0,
2636 /// mug_size: u8,
2637 /// temperature: u8,
2638 /// marshmallows: [[u8; 2]],
2639 /// }
2640 ///
2641 /// let bytes = &[123, 0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
2642 ///
2643 /// let (prefix, packet) = Packet::try_ref_from_suffix_with_elems(bytes, 3).unwrap();
2644 ///
2645 /// assert_eq!(packet.mug_size, 240);
2646 /// assert_eq!(packet.temperature, 77);
2647 /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2648 /// assert_eq!(prefix, &[123u8][..]);
2649 ///
2650 /// // These bytes are not valid instance of `Packet`.
2651 /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 77, 240, 0xC0, 0xC0][..];
2652 /// assert!(Packet::try_ref_from_suffix_with_elems(bytes, 3).is_err());
2653 /// ```
2654 ///
2655 /// Since an explicit `count` is provided, this method supports types with
2656 /// zero-sized trailing slice elements. Methods such as [`try_ref_from_prefix`]
2657 /// which do not take an explicit count do not support such types.
2658 ///
2659 /// ```
2660 /// use core::num::NonZeroU16;
2661 /// use zerocopy::*;
2662 /// # use zerocopy_derive::*;
2663 ///
2664 /// #[derive(TryFromBytes, Immutable, KnownLayout)]
2665 /// #[repr(C)]
2666 /// struct ZSTy {
2667 /// leading_sized: NonZeroU16,
2668 /// trailing_dst: [()],
2669 /// }
2670 ///
2671 /// let src = 0xCAFEu16.as_bytes();
2672 /// let (_, zsty) = ZSTy::try_ref_from_suffix_with_elems(src, 42).unwrap();
2673 /// assert_eq!(zsty.trailing_dst.len(), 42);
2674 /// ```
2675 ///
2676 /// [`try_ref_from_prefix`]: TryFromBytes::try_ref_from_prefix
2677 ///
2678 #[doc = codegen_section!(
2679 header = "h5",
2680 bench = "try_ref_from_suffix_with_elems",
2681 format = "coco",
2682 arity = 2,
2683 [
2684 open
2685 @index 1
2686 @title "Unsized"
2687 @variant "dynamic_size"
2688 ],
2689 [
2690 @index 2
2691 @title "Dynamically Padded"
2692 @variant "dynamic_padding"
2693 ]
2694 )]
2695 #[must_use = "has no side effects"]
2696 #[cfg_attr(zerocopy_inline_always, inline(always))]
2697 #[cfg_attr(not(zerocopy_inline_always), inline)]
2698 fn try_ref_from_suffix_with_elems(
2699 source: &[u8],
2700 count: usize,
2701 ) -> Result<(&[u8], &Self), TryCastError<&[u8], Self>>
2702 where
2703 Self: KnownLayout<PointerMetadata = usize> + Immutable,
2704 {
2705 try_ref_from_prefix_suffix(source, CastType::Suffix, Some(count)).map(swap)
2706 }
2707
2708 /// Attempts to interpret the given `source` as a `&mut Self` with a DST
2709 /// length equal to `count`.
2710 ///
2711 /// This method attempts to return a reference to `source` interpreted as a
2712 /// `Self` with `count` trailing elements. If the length of `source` is not
2713 /// equal to the size of `Self` with `count` elements, if `source` is not
2714 /// appropriately aligned, or if `source` does not contain a valid instance
2715 /// of `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned],
2716 /// you can [infallibly discard the alignment error][ConvertError::from].
2717 ///
2718 /// [self-unaligned]: Unaligned
2719 /// [slice-dst]: KnownLayout#dynamically-sized-types
2720 ///
2721 /// # Examples
2722 ///
2723 /// ```
2724 /// # #![allow(non_camel_case_types)] // For C0::xC0
2725 /// use zerocopy::TryFromBytes;
2726 /// # use zerocopy_derive::*;
2727 ///
2728 /// // The only valid value of this type is the byte `0xC0`
2729 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2730 /// #[repr(u8)]
2731 /// enum C0 { xC0 = 0xC0 }
2732 ///
2733 /// // The only valid value of this type is the bytes `0xC0C0`.
2734 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2735 /// #[repr(C)]
2736 /// struct C0C0(C0, C0);
2737 ///
2738 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2739 /// #[repr(C, packed)]
2740 /// struct Packet {
2741 /// magic_number: C0C0,
2742 /// mug_size: u8,
2743 /// temperature: u8,
2744 /// marshmallows: [[u8; 2]],
2745 /// }
2746 ///
2747 /// let bytes = &mut [0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
2748 ///
2749 /// let packet = Packet::try_mut_from_bytes_with_elems(bytes, 3).unwrap();
2750 ///
2751 /// assert_eq!(packet.mug_size, 240);
2752 /// assert_eq!(packet.temperature, 77);
2753 /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2754 ///
2755 /// packet.temperature = 111;
2756 ///
2757 /// assert_eq!(bytes, [0xC0, 0xC0, 240, 111, 2, 3, 4, 5, 6, 7]);
2758 ///
2759 /// // These bytes are not valid instance of `Packet`.
2760 /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 77, 240, 0xC0, 0xC0][..];
2761 /// assert!(Packet::try_mut_from_bytes_with_elems(bytes, 3).is_err());
2762 /// ```
2763 ///
2764 /// Since an explicit `count` is provided, this method supports types with
2765 /// zero-sized trailing slice elements. Methods such as [`try_mut_from_bytes`]
2766 /// which do not take an explicit count do not support such types.
2767 ///
2768 /// ```
2769 /// use core::num::NonZeroU16;
2770 /// use zerocopy::*;
2771 /// # use zerocopy_derive::*;
2772 ///
2773 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2774 /// #[repr(C, packed)]
2775 /// struct ZSTy {
2776 /// leading_sized: NonZeroU16,
2777 /// trailing_dst: [()],
2778 /// }
2779 ///
2780 /// let mut src = 0xCAFEu16;
2781 /// let src = src.as_mut_bytes();
2782 /// let zsty = ZSTy::try_mut_from_bytes_with_elems(src, 42).unwrap();
2783 /// assert_eq!(zsty.trailing_dst.len(), 42);
2784 /// ```
2785 ///
2786 /// [`try_mut_from_bytes`]: TryFromBytes::try_mut_from_bytes
2787 ///
2788 #[doc = codegen_header!("h5", "try_mut_from_bytes_with_elems")]
2789 ///
2790 /// See [`TryFromBytes::try_ref_from_bytes_with_elems`](#method.try_ref_from_bytes_with_elems.codegen).
2791 #[must_use = "has no side effects"]
2792 #[cfg_attr(zerocopy_inline_always, inline(always))]
2793 #[cfg_attr(not(zerocopy_inline_always), inline)]
2794 fn try_mut_from_bytes_with_elems(
2795 source: &mut [u8],
2796 count: usize,
2797 ) -> Result<&mut Self, TryCastError<&mut [u8], Self>>
2798 where
2799 Self: KnownLayout<PointerMetadata = usize> + IntoBytes,
2800 {
2801 match Ptr::from_mut(source).try_cast_into_no_leftover::<Self, BecauseExclusive>(Some(count))
2802 {
2803 Ok(source) => {
2804 // This call may panic. If that happens, it doesn't cause any soundness
2805 // issues, as we have not generated any invalid state which we need to
2806 // fix before returning.
2807 match source.try_into_valid() {
2808 Ok(source) => Ok(source.as_mut()),
2809 Err(e) => Err(e.map_src(|src| src.as_bytes().as_mut()).into()),
2810 }
2811 }
2812 Err(e) => Err(e.map_src(Ptr::as_mut).into()),
2813 }
2814 }
2815
2816 /// Attempts to interpret the prefix of the given `source` as a `&mut Self`
2817 /// with a DST length equal to `count`.
2818 ///
2819 /// This method attempts to return a reference to the prefix of `source`
2820 /// interpreted as a `Self` with `count` trailing elements, and a reference
2821 /// to the remaining bytes. If the length of `source` is less than the size
2822 /// of `Self` with `count` elements, if `source` is not appropriately
2823 /// aligned, or if the prefix of `source` does not contain a valid instance
2824 /// of `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned],
2825 /// you can [infallibly discard the alignment error][ConvertError::from].
2826 ///
2827 /// [self-unaligned]: Unaligned
2828 /// [slice-dst]: KnownLayout#dynamically-sized-types
2829 ///
2830 /// # Examples
2831 ///
2832 /// ```
2833 /// # #![allow(non_camel_case_types)] // For C0::xC0
2834 /// use zerocopy::TryFromBytes;
2835 /// # use zerocopy_derive::*;
2836 ///
2837 /// // The only valid value of this type is the byte `0xC0`
2838 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2839 /// #[repr(u8)]
2840 /// enum C0 { xC0 = 0xC0 }
2841 ///
2842 /// // The only valid value of this type is the bytes `0xC0C0`.
2843 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2844 /// #[repr(C)]
2845 /// struct C0C0(C0, C0);
2846 ///
2847 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2848 /// #[repr(C, packed)]
2849 /// struct Packet {
2850 /// magic_number: C0C0,
2851 /// mug_size: u8,
2852 /// temperature: u8,
2853 /// marshmallows: [[u8; 2]],
2854 /// }
2855 ///
2856 /// let bytes = &mut [0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7, 8][..];
2857 ///
2858 /// let (packet, suffix) = Packet::try_mut_from_prefix_with_elems(bytes, 3).unwrap();
2859 ///
2860 /// assert_eq!(packet.mug_size, 240);
2861 /// assert_eq!(packet.temperature, 77);
2862 /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2863 /// assert_eq!(suffix, &[8u8][..]);
2864 ///
2865 /// packet.temperature = 111;
2866 /// suffix[0] = 222;
2867 ///
2868 /// assert_eq!(bytes, [0xC0, 0xC0, 240, 111, 2, 3, 4, 5, 6, 7, 222]);
2869 ///
2870 /// // These bytes are not valid instance of `Packet`.
2871 /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 77, 240, 0xC0, 0xC0][..];
2872 /// assert!(Packet::try_mut_from_prefix_with_elems(bytes, 3).is_err());
2873 /// ```
2874 ///
2875 /// Since an explicit `count` is provided, this method supports types with
2876 /// zero-sized trailing slice elements. Methods such as [`try_mut_from_prefix`]
2877 /// which do not take an explicit count do not support such types.
2878 ///
2879 /// ```
2880 /// use core::num::NonZeroU16;
2881 /// use zerocopy::*;
2882 /// # use zerocopy_derive::*;
2883 ///
2884 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2885 /// #[repr(C, packed)]
2886 /// struct ZSTy {
2887 /// leading_sized: NonZeroU16,
2888 /// trailing_dst: [()],
2889 /// }
2890 ///
2891 /// let mut src = 0xCAFEu16;
2892 /// let src = src.as_mut_bytes();
2893 /// let (zsty, _) = ZSTy::try_mut_from_prefix_with_elems(src, 42).unwrap();
2894 /// assert_eq!(zsty.trailing_dst.len(), 42);
2895 /// ```
2896 ///
2897 /// [`try_mut_from_prefix`]: TryFromBytes::try_mut_from_prefix
2898 ///
2899 #[doc = codegen_header!("h5", "try_mut_from_prefix_with_elems")]
2900 ///
2901 /// See [`TryFromBytes::try_ref_from_prefix_with_elems`](#method.try_ref_from_prefix_with_elems.codegen).
2902 #[must_use = "has no side effects"]
2903 #[cfg_attr(zerocopy_inline_always, inline(always))]
2904 #[cfg_attr(not(zerocopy_inline_always), inline)]
2905 fn try_mut_from_prefix_with_elems(
2906 source: &mut [u8],
2907 count: usize,
2908 ) -> Result<(&mut Self, &mut [u8]), TryCastError<&mut [u8], Self>>
2909 where
2910 Self: KnownLayout<PointerMetadata = usize> + IntoBytes,
2911 {
2912 try_mut_from_prefix_suffix(source, CastType::Prefix, Some(count))
2913 }
2914
2915 /// Attempts to interpret the suffix of the given `source` as a `&mut Self`
2916 /// with a DST length equal to `count`.
2917 ///
2918 /// This method attempts to return a reference to the suffix of `source`
2919 /// interpreted as a `Self` with `count` trailing elements, and a reference
2920 /// to the preceding bytes. If the length of `source` is less than the size
2921 /// of `Self` with `count` elements, if the suffix of `source` is not
2922 /// appropriately aligned, or if the suffix of `source` does not contain a
2923 /// valid instance of `Self`, this returns `Err`. If [`Self:
2924 /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
2925 /// error][ConvertError::from].
2926 ///
2927 /// [self-unaligned]: Unaligned
2928 /// [slice-dst]: KnownLayout#dynamically-sized-types
2929 ///
2930 /// # Examples
2931 ///
2932 /// ```
2933 /// # #![allow(non_camel_case_types)] // For C0::xC0
2934 /// use zerocopy::TryFromBytes;
2935 /// # use zerocopy_derive::*;
2936 ///
2937 /// // The only valid value of this type is the byte `0xC0`
2938 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2939 /// #[repr(u8)]
2940 /// enum C0 { xC0 = 0xC0 }
2941 ///
2942 /// // The only valid value of this type is the bytes `0xC0C0`.
2943 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2944 /// #[repr(C)]
2945 /// struct C0C0(C0, C0);
2946 ///
2947 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2948 /// #[repr(C, packed)]
2949 /// struct Packet {
2950 /// magic_number: C0C0,
2951 /// mug_size: u8,
2952 /// temperature: u8,
2953 /// marshmallows: [[u8; 2]],
2954 /// }
2955 ///
2956 /// let bytes = &mut [123, 0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
2957 ///
2958 /// let (prefix, packet) = Packet::try_mut_from_suffix_with_elems(bytes, 3).unwrap();
2959 ///
2960 /// assert_eq!(packet.mug_size, 240);
2961 /// assert_eq!(packet.temperature, 77);
2962 /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2963 /// assert_eq!(prefix, &[123u8][..]);
2964 ///
2965 /// prefix[0] = 111;
2966 /// packet.temperature = 222;
2967 ///
2968 /// assert_eq!(bytes, [111, 0xC0, 0xC0, 240, 222, 2, 3, 4, 5, 6, 7]);
2969 ///
2970 /// // These bytes are not valid instance of `Packet`.
2971 /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 77, 240, 0xC0, 0xC0][..];
2972 /// assert!(Packet::try_mut_from_suffix_with_elems(bytes, 3).is_err());
2973 /// ```
2974 ///
2975 /// Since an explicit `count` is provided, this method supports types with
2976 /// zero-sized trailing slice elements. Methods such as [`try_mut_from_prefix`]
2977 /// which do not take an explicit count do not support such types.
2978 ///
2979 /// ```
2980 /// use core::num::NonZeroU16;
2981 /// use zerocopy::*;
2982 /// # use zerocopy_derive::*;
2983 ///
2984 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2985 /// #[repr(C, packed)]
2986 /// struct ZSTy {
2987 /// leading_sized: NonZeroU16,
2988 /// trailing_dst: [()],
2989 /// }
2990 ///
2991 /// let mut src = 0xCAFEu16;
2992 /// let src = src.as_mut_bytes();
2993 /// let (_, zsty) = ZSTy::try_mut_from_suffix_with_elems(src, 42).unwrap();
2994 /// assert_eq!(zsty.trailing_dst.len(), 42);
2995 /// ```
2996 ///
2997 /// [`try_mut_from_prefix`]: TryFromBytes::try_mut_from_prefix
2998 ///
2999 #[doc = codegen_header!("h5", "try_mut_from_suffix_with_elems")]
3000 ///
3001 /// See [`TryFromBytes::try_ref_from_suffix_with_elems`](#method.try_ref_from_suffix_with_elems.codegen).
3002 #[must_use = "has no side effects"]
3003 #[cfg_attr(zerocopy_inline_always, inline(always))]
3004 #[cfg_attr(not(zerocopy_inline_always), inline)]
3005 fn try_mut_from_suffix_with_elems(
3006 source: &mut [u8],
3007 count: usize,
3008 ) -> Result<(&mut [u8], &mut Self), TryCastError<&mut [u8], Self>>
3009 where
3010 Self: KnownLayout<PointerMetadata = usize> + IntoBytes,
3011 {
3012 try_mut_from_prefix_suffix(source, CastType::Suffix, Some(count)).map(swap)
3013 }
3014
3015 /// Attempts to read the given `source` as a `Self`.
3016 ///
3017 /// If `source.len() != size_of::<Self>()` or the bytes are not a valid
3018 /// instance of `Self`, this returns `Err`.
3019 ///
3020 /// # Examples
3021 ///
3022 /// ```
3023 /// use zerocopy::TryFromBytes;
3024 /// # use zerocopy_derive::*;
3025 ///
3026 /// // The only valid value of this type is the byte `0xC0`
3027 /// #[derive(TryFromBytes)]
3028 /// #[repr(u8)]
3029 /// enum C0 { xC0 = 0xC0 }
3030 ///
3031 /// // The only valid value of this type is the bytes `0xC0C0`.
3032 /// #[derive(TryFromBytes)]
3033 /// #[repr(C)]
3034 /// struct C0C0(C0, C0);
3035 ///
3036 /// #[derive(TryFromBytes)]
3037 /// #[repr(C)]
3038 /// struct Packet {
3039 /// magic_number: C0C0,
3040 /// mug_size: u8,
3041 /// temperature: u8,
3042 /// }
3043 ///
3044 /// let bytes = &[0xC0, 0xC0, 240, 77][..];
3045 ///
3046 /// let packet = Packet::try_read_from_bytes(bytes).unwrap();
3047 ///
3048 /// assert_eq!(packet.mug_size, 240);
3049 /// assert_eq!(packet.temperature, 77);
3050 ///
3051 /// // These bytes are not valid instance of `Packet`.
3052 /// let bytes = &mut [0x10, 0xC0, 240, 77][..];
3053 /// assert!(Packet::try_read_from_bytes(bytes).is_err());
3054 /// ```
3055 ///
3056 /// # Performance Considerations
3057 ///
3058 /// In this version of zerocopy, this method reads the `source` into a
3059 /// well-aligned stack allocation and *then* validates that the allocation
3060 /// is a valid `Self`. This ensures that validation can be performed using
3061 /// aligned reads (which carry a performance advantage over unaligned reads
3062 /// on many platforms) at the cost of an unconditional copy.
3063 ///
3064 #[doc = codegen_section!(
3065 header = "h5",
3066 bench = "try_read_from_bytes",
3067 format = "coco_static_size",
3068 )]
3069 #[must_use = "has no side effects"]
3070 #[cfg_attr(zerocopy_inline_always, inline(always))]
3071 #[cfg_attr(not(zerocopy_inline_always), inline)]
3072 fn try_read_from_bytes(source: &[u8]) -> Result<Self, TryReadError<&[u8], Self>>
3073 where
3074 Self: Sized,
3075 {
3076 // FIXME(#2981): If `align_of::<Self>() == 1`, validate `source` in-place.
3077
3078 let candidate = match CoreMaybeUninit::<Self>::read_from_bytes(source) {
3079 Ok(candidate) => candidate,
3080 Err(e) => {
3081 return Err(TryReadError::Size(e.with_dst()));
3082 }
3083 };
3084 // SAFETY: `candidate` was copied from from `source: &[u8]`, so all of
3085 // its bytes are initialized.
3086 unsafe { try_read_from(source, candidate) }
3087 }
3088
3089 /// Attempts to read a `Self` from the prefix of the given `source`.
3090 ///
3091 /// This attempts to read a `Self` from the first `size_of::<Self>()` bytes
3092 /// of `source`, returning that `Self` and any remaining bytes. If
3093 /// `source.len() < size_of::<Self>()` or the bytes are not a valid instance
3094 /// of `Self`, it returns `Err`.
3095 ///
3096 /// # Examples
3097 ///
3098 /// ```
3099 /// use zerocopy::TryFromBytes;
3100 /// # use zerocopy_derive::*;
3101 ///
3102 /// // The only valid value of this type is the byte `0xC0`
3103 /// #[derive(TryFromBytes)]
3104 /// #[repr(u8)]
3105 /// enum C0 { xC0 = 0xC0 }
3106 ///
3107 /// // The only valid value of this type is the bytes `0xC0C0`.
3108 /// #[derive(TryFromBytes)]
3109 /// #[repr(C)]
3110 /// struct C0C0(C0, C0);
3111 ///
3112 /// #[derive(TryFromBytes)]
3113 /// #[repr(C)]
3114 /// struct Packet {
3115 /// magic_number: C0C0,
3116 /// mug_size: u8,
3117 /// temperature: u8,
3118 /// }
3119 ///
3120 /// // These are more bytes than are needed to encode a `Packet`.
3121 /// let bytes = &[0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
3122 ///
3123 /// let (packet, suffix) = Packet::try_read_from_prefix(bytes).unwrap();
3124 ///
3125 /// assert_eq!(packet.mug_size, 240);
3126 /// assert_eq!(packet.temperature, 77);
3127 /// assert_eq!(suffix, &[0u8, 1, 2, 3, 4, 5, 6][..]);
3128 ///
3129 /// // These bytes are not valid instance of `Packet`.
3130 /// let bytes = &[0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
3131 /// assert!(Packet::try_read_from_prefix(bytes).is_err());
3132 /// ```
3133 ///
3134 /// # Performance Considerations
3135 ///
3136 /// In this version of zerocopy, this method reads the `source` into a
3137 /// well-aligned stack allocation and *then* validates that the allocation
3138 /// is a valid `Self`. This ensures that validation can be performed using
3139 /// aligned reads (which carry a performance advantage over unaligned reads
3140 /// on many platforms) at the cost of an unconditional copy.
3141 ///
3142 #[doc = codegen_section!(
3143 header = "h5",
3144 bench = "try_read_from_prefix",
3145 format = "coco_static_size",
3146 )]
3147 #[must_use = "has no side effects"]
3148 #[cfg_attr(zerocopy_inline_always, inline(always))]
3149 #[cfg_attr(not(zerocopy_inline_always), inline)]
3150 fn try_read_from_prefix(source: &[u8]) -> Result<(Self, &[u8]), TryReadError<&[u8], Self>>
3151 where
3152 Self: Sized,
3153 {
3154 // FIXME(#2981): If `align_of::<Self>() == 1`, validate `source` in-place.
3155
3156 let (candidate, suffix) = match CoreMaybeUninit::<Self>::read_from_prefix(source) {
3157 Ok(candidate) => candidate,
3158 Err(e) => {
3159 return Err(TryReadError::Size(e.with_dst()));
3160 }
3161 };
3162 // SAFETY: `candidate` was copied from from `source: &[u8]`, so all of
3163 // its bytes are initialized.
3164 unsafe { try_read_from(source, candidate).map(|slf| (slf, suffix)) }
3165 }
3166
3167 /// Attempts to read a `Self` from the suffix of the given `source`.
3168 ///
3169 /// This attempts to read a `Self` from the last `size_of::<Self>()` bytes
3170 /// of `source`, returning that `Self` and any preceding bytes. If
3171 /// `source.len() < size_of::<Self>()` or the bytes are not a valid instance
3172 /// of `Self`, it returns `Err`.
3173 ///
3174 /// # Examples
3175 ///
3176 /// ```
3177 /// # #![allow(non_camel_case_types)] // For C0::xC0
3178 /// use zerocopy::TryFromBytes;
3179 /// # use zerocopy_derive::*;
3180 ///
3181 /// // The only valid value of this type is the byte `0xC0`
3182 /// #[derive(TryFromBytes)]
3183 /// #[repr(u8)]
3184 /// enum C0 { xC0 = 0xC0 }
3185 ///
3186 /// // The only valid value of this type is the bytes `0xC0C0`.
3187 /// #[derive(TryFromBytes)]
3188 /// #[repr(C)]
3189 /// struct C0C0(C0, C0);
3190 ///
3191 /// #[derive(TryFromBytes)]
3192 /// #[repr(C)]
3193 /// struct Packet {
3194 /// magic_number: C0C0,
3195 /// mug_size: u8,
3196 /// temperature: u8,
3197 /// }
3198 ///
3199 /// // These are more bytes than are needed to encode a `Packet`.
3200 /// let bytes = &[0, 1, 2, 3, 4, 5, 0xC0, 0xC0, 240, 77][..];
3201 ///
3202 /// let (prefix, packet) = Packet::try_read_from_suffix(bytes).unwrap();
3203 ///
3204 /// assert_eq!(packet.mug_size, 240);
3205 /// assert_eq!(packet.temperature, 77);
3206 /// assert_eq!(prefix, &[0u8, 1, 2, 3, 4, 5][..]);
3207 ///
3208 /// // These bytes are not valid instance of `Packet`.
3209 /// let bytes = &[0, 1, 2, 3, 4, 5, 0x10, 0xC0, 240, 77][..];
3210 /// assert!(Packet::try_read_from_suffix(bytes).is_err());
3211 /// ```
3212 ///
3213 /// # Performance Considerations
3214 ///
3215 /// In this version of zerocopy, this method reads the `source` into a
3216 /// well-aligned stack allocation and *then* validates that the allocation
3217 /// is a valid `Self`. This ensures that validation can be performed using
3218 /// aligned reads (which carry a performance advantage over unaligned reads
3219 /// on many platforms) at the cost of an unconditional copy.
3220 ///
3221 #[doc = codegen_section!(
3222 header = "h5",
3223 bench = "try_read_from_suffix",
3224 format = "coco_static_size",
3225 )]
3226 #[must_use = "has no side effects"]
3227 #[cfg_attr(zerocopy_inline_always, inline(always))]
3228 #[cfg_attr(not(zerocopy_inline_always), inline)]
3229 fn try_read_from_suffix(source: &[u8]) -> Result<(&[u8], Self), TryReadError<&[u8], Self>>
3230 where
3231 Self: Sized,
3232 {
3233 // FIXME(#2981): If `align_of::<Self>() == 1`, validate `source` in-place.
3234
3235 let (prefix, candidate) = match CoreMaybeUninit::<Self>::read_from_suffix(source) {
3236 Ok(candidate) => candidate,
3237 Err(e) => {
3238 return Err(TryReadError::Size(e.with_dst()));
3239 }
3240 };
3241 // SAFETY: `candidate` was copied from from `source: &[u8]`, so all of
3242 // its bytes are initialized.
3243 unsafe { try_read_from(source, candidate).map(|slf| (prefix, slf)) }
3244 }
3245}
3246
3247#[inline(always)]
3248fn try_ref_from_prefix_suffix<T: TryFromBytes + KnownLayout + Immutable + ?Sized>(
3249 source: &[u8],
3250 cast_type: CastType,
3251 meta: Option<T::PointerMetadata>,
3252) -> Result<(&T, &[u8]), TryCastError<&[u8], T>> {
3253 match Ptr::from_ref(source).try_cast_into::<T, BecauseImmutable>(cast_type, meta) {
3254 Ok((source, prefix_suffix)) => {
3255 // This call may panic. If that happens, it doesn't cause any soundness
3256 // issues, as we have not generated any invalid state which we need to
3257 // fix before returning.
3258 match source.try_into_valid() {
3259 Ok(valid) => Ok((valid.as_ref(), prefix_suffix.as_ref())),
3260 Err(e) => Err(e.map_src(|src| src.as_bytes::<BecauseImmutable>().as_ref()).into()),
3261 }
3262 }
3263 Err(e) => Err(e.map_src(Ptr::as_ref).into()),
3264 }
3265}
3266
3267#[inline(always)]
3268fn try_mut_from_prefix_suffix<T: IntoBytes + TryFromBytes + KnownLayout + ?Sized>(
3269 candidate: &mut [u8],
3270 cast_type: CastType,
3271 meta: Option<T::PointerMetadata>,
3272) -> Result<(&mut T, &mut [u8]), TryCastError<&mut [u8], T>> {
3273 match Ptr::from_mut(candidate).try_cast_into::<T, BecauseExclusive>(cast_type, meta) {
3274 Ok((candidate, prefix_suffix)) => {
3275 // This call may panic. If that happens, it doesn't cause any soundness
3276 // issues, as we have not generated any invalid state which we need to
3277 // fix before returning.
3278 match candidate.try_into_valid() {
3279 Ok(valid) => Ok((valid.as_mut(), prefix_suffix.as_mut())),
3280 Err(e) => Err(e.map_src(|src| src.as_bytes().as_mut()).into()),
3281 }
3282 }
3283 Err(e) => Err(e.map_src(Ptr::as_mut).into()),
3284 }
3285}
3286
3287#[inline(always)]
3288fn swap<T, U>((t, u): (T, U)) -> (U, T) {
3289 (u, t)
3290}
3291
3292/// # Safety
3293///
3294/// All bytes of `candidate` must be initialized.
3295#[inline(always)]
3296unsafe fn try_read_from<S, T: TryFromBytes>(
3297 source: S,
3298 mut candidate: CoreMaybeUninit<T>,
3299) -> Result<T, TryReadError<S, T>> {
3300 // We use `from_mut` despite not mutating via `c_ptr` so that we don't need
3301 // to add a `T: Immutable` bound.
3302 let c_ptr = Ptr::from_mut(&mut candidate);
3303 // SAFETY: `c_ptr` has no uninitialized sub-ranges because it derived from
3304 // `candidate`, which the caller promises is entirely initialized. Since
3305 // `candidate` is a `MaybeUninit`, it has no validity requirements, and so
3306 // no values written to an `Initialized` `c_ptr` can violate its validity.
3307 // Since `c_ptr` has `Exclusive` aliasing, no mutations may happen except
3308 // via `c_ptr` so long as it is live, so we don't need to worry about the
3309 // fact that `c_ptr` may have more restricted validity than `candidate`.
3310 let c_ptr = unsafe { c_ptr.assume_validity::<invariant::Initialized>() };
3311 let mut c_ptr = c_ptr.cast::<_, crate::pointer::cast::CastSized, _>();
3312
3313 // Since we don't have `T: KnownLayout`, we hack around that by using
3314 // `Wrapping<T>`, which implements `KnownLayout` even if `T` doesn't.
3315 //
3316 // This call may panic. If that happens, it doesn't cause any soundness
3317 // issues, as we have not generated any invalid state which we need to fix
3318 // before returning.
3319 if !Wrapping::<T>::is_bit_valid(c_ptr.reborrow_shared().forget_aligned()) {
3320 return Err(ValidityError::new(source).into());
3321 }
3322
3323 fn _assert_same_size_and_validity<T>()
3324 where
3325 Wrapping<T>: pointer::TransmuteFrom<T, invariant::Valid, invariant::Valid>,
3326 T: pointer::TransmuteFrom<Wrapping<T>, invariant::Valid, invariant::Valid>,
3327 {
3328 }
3329
3330 _assert_same_size_and_validity::<T>();
3331
3332 // SAFETY: We just validated that `candidate` contains a valid
3333 // `Wrapping<T>`, which has the same size and bit validity as `T`, as
3334 // guaranteed by the preceding type assertion.
3335 Ok(unsafe { candidate.assume_init() })
3336}
3337
3338/// Types for which a sequence of `0` bytes is a valid instance.
3339///
3340/// Any memory region of the appropriate length which is guaranteed to contain
3341/// only zero bytes can be viewed as any `FromZeros` type with no runtime
3342/// overhead. This is useful whenever memory is known to be in a zeroed state,
3343/// such memory returned from some allocation routines.
3344///
3345/// # Warning: Padding bytes
3346///
3347/// Note that, when a value is moved or copied, only the non-padding bytes of
3348/// that value are guaranteed to be preserved. It is unsound to assume that
3349/// values written to padding bytes are preserved after a move or copy. For more
3350/// details, see the [`FromBytes` docs][frombytes-warning-padding-bytes].
3351///
3352/// [frombytes-warning-padding-bytes]: FromBytes#warning-padding-bytes
3353///
3354/// # Implementation
3355///
3356/// **Do not implement this trait yourself!** Instead, use
3357/// [`#[derive(FromZeros)]`][derive]; e.g.:
3358///
3359/// ```
3360/// # use zerocopy_derive::{FromZeros, Immutable};
3361/// #[derive(FromZeros)]
3362/// struct MyStruct {
3363/// # /*
3364/// ...
3365/// # */
3366/// }
3367///
3368/// #[derive(FromZeros)]
3369/// #[repr(u8)]
3370/// enum MyEnum {
3371/// # Variant0,
3372/// # /*
3373/// ...
3374/// # */
3375/// }
3376///
3377/// #[derive(FromZeros, Immutable)]
3378/// union MyUnion {
3379/// # variant: u8,
3380/// # /*
3381/// ...
3382/// # */
3383/// }
3384/// ```
3385///
3386/// This derive performs a sophisticated, compile-time safety analysis to
3387/// determine whether a type is `FromZeros`.
3388///
3389/// # Safety
3390///
3391/// *This section describes what is required in order for `T: FromZeros`, and
3392/// what unsafe code may assume of such types. If you don't plan on implementing
3393/// `FromZeros` manually, and you don't plan on writing unsafe code that
3394/// operates on `FromZeros` types, then you don't need to read this section.*
3395///
3396/// If `T: FromZeros`, then unsafe code may assume that it is sound to produce a
3397/// `T` whose bytes are all initialized to zero. If a type is marked as
3398/// `FromZeros` which violates this contract, it may cause undefined behavior.
3399///
3400/// `#[derive(FromZeros)]` only permits [types which satisfy these
3401/// requirements][derive-analysis].
3402///
3403#[cfg_attr(
3404 feature = "derive",
3405 doc = "[derive]: zerocopy_derive::FromZeros",
3406 doc = "[derive-analysis]: zerocopy_derive::FromZeros#analysis"
3407)]
3408#[cfg_attr(
3409 not(feature = "derive"),
3410 doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromZeros.html"),
3411 doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromZeros.html#analysis"),
3412)]
3413#[cfg_attr(
3414 not(no_zerocopy_diagnostic_on_unimplemented_1_78_0),
3415 diagnostic::on_unimplemented(note = "Consider adding `#[derive(FromZeros)]` to `{Self}`")
3416)]
3417pub unsafe trait FromZeros: TryFromBytes {
3418 // The `Self: Sized` bound makes it so that `FromZeros` is still object
3419 // safe.
3420 #[doc(hidden)]
3421 fn only_derive_is_allowed_to_implement_this_trait()
3422 where
3423 Self: Sized;
3424
3425 /// Overwrites `self` with zeros.
3426 ///
3427 /// Sets every byte in `self` to 0. While this is similar to doing `*self =
3428 /// Self::new_zeroed()`, it differs in that `zero` does not semantically
3429 /// drop the current value and replace it with a new one — it simply
3430 /// modifies the bytes of the existing value.
3431 ///
3432 /// # Examples
3433 ///
3434 /// ```
3435 /// # use zerocopy::FromZeros;
3436 /// # use zerocopy_derive::*;
3437 /// #
3438 /// #[derive(FromZeros)]
3439 /// #[repr(C)]
3440 /// struct PacketHeader {
3441 /// src_port: [u8; 2],
3442 /// dst_port: [u8; 2],
3443 /// length: [u8; 2],
3444 /// checksum: [u8; 2],
3445 /// }
3446 ///
3447 /// let mut header = PacketHeader {
3448 /// src_port: 100u16.to_be_bytes(),
3449 /// dst_port: 200u16.to_be_bytes(),
3450 /// length: 300u16.to_be_bytes(),
3451 /// checksum: 400u16.to_be_bytes(),
3452 /// };
3453 ///
3454 /// header.zero();
3455 ///
3456 /// assert_eq!(header.src_port, [0, 0]);
3457 /// assert_eq!(header.dst_port, [0, 0]);
3458 /// assert_eq!(header.length, [0, 0]);
3459 /// assert_eq!(header.checksum, [0, 0]);
3460 /// ```
3461 ///
3462 #[doc = codegen_section!(
3463 header = "h5",
3464 bench = "zero",
3465 format = "coco",
3466 arity = 3,
3467 [
3468 open
3469 @index 1
3470 @title "Sized"
3471 @variant "static_size"
3472 ],
3473 [
3474 @index 2
3475 @title "Unsized"
3476 @variant "dynamic_size"
3477 ],
3478 [
3479 @index 3
3480 @title "Dynamically Padded"
3481 @variant "dynamic_padding"
3482 ]
3483 )]
3484 #[inline(always)]
3485 fn zero(&mut self) {
3486 let slf: *mut Self = self;
3487 let len = mem::size_of_val(self);
3488 // SAFETY:
3489 // - `self` is guaranteed by the type system to be valid for writes of
3490 // size `size_of_val(self)`.
3491 // - `u8`'s alignment is 1, and thus `self` is guaranteed to be aligned
3492 // as required by `u8`.
3493 // - Since `Self: FromZeros`, the all-zeros instance is a valid instance
3494 // of `Self.`
3495 //
3496 // FIXME(#429): Add references to docs and quotes.
3497 unsafe { ptr::write_bytes(slf.cast::<u8>(), 0, len) };
3498 }
3499
3500 /// Creates an instance of `Self` from zeroed bytes.
3501 ///
3502 /// # Examples
3503 ///
3504 /// ```
3505 /// # use zerocopy::FromZeros;
3506 /// # use zerocopy_derive::*;
3507 /// #
3508 /// #[derive(FromZeros)]
3509 /// #[repr(C)]
3510 /// struct PacketHeader {
3511 /// src_port: [u8; 2],
3512 /// dst_port: [u8; 2],
3513 /// length: [u8; 2],
3514 /// checksum: [u8; 2],
3515 /// }
3516 ///
3517 /// let header: PacketHeader = FromZeros::new_zeroed();
3518 ///
3519 /// assert_eq!(header.src_port, [0, 0]);
3520 /// assert_eq!(header.dst_port, [0, 0]);
3521 /// assert_eq!(header.length, [0, 0]);
3522 /// assert_eq!(header.checksum, [0, 0]);
3523 /// ```
3524 ///
3525 #[doc = codegen_section!(
3526 header = "h5",
3527 bench = "new_zeroed",
3528 format = "coco_static_size",
3529 )]
3530 #[must_use = "has no side effects"]
3531 #[inline(always)]
3532 fn new_zeroed() -> Self
3533 where
3534 Self: Sized,
3535 {
3536 // SAFETY: `FromZeros` says that the all-zeros bit pattern is legal.
3537 unsafe { mem::zeroed() }
3538 }
3539
3540 /// Creates a `Box<Self>` from zeroed bytes.
3541 ///
3542 /// This function is useful for allocating large values on the heap and
3543 /// zero-initializing them, without ever creating a temporary instance of
3544 /// `Self` on the stack. For example, `<[u8; 1048576]>::new_box_zeroed()`
3545 /// will allocate `[u8; 1048576]` directly on the heap; it does not require
3546 /// storing `[u8; 1048576]` in a temporary variable on the stack.
3547 ///
3548 /// On systems that use a heap implementation that supports allocating from
3549 /// pre-zeroed memory, using `new_box_zeroed` (or related functions) may
3550 /// have performance benefits.
3551 ///
3552 /// # Errors
3553 ///
3554 /// Returns an error on allocation failure. Allocation failure is guaranteed
3555 /// never to cause a panic or an abort.
3556 ///
3557 #[doc = codegen_section!(
3558 header = "h5",
3559 bench = "new_box_zeroed",
3560 format = "coco_static_size",
3561 )]
3562 #[must_use = "has no side effects (other than allocation)"]
3563 #[cfg(any(feature = "alloc", test))]
3564 #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
3565 #[inline]
3566 fn new_box_zeroed() -> Result<Box<Self>, AllocError>
3567 where
3568 Self: Sized,
3569 {
3570 // If `T` is a ZST, then return a proper boxed instance of it. There is
3571 // no allocation, but `Box` does require a correct dangling pointer.
3572 let layout = Layout::new::<Self>();
3573 if layout.size() == 0 {
3574 // Construct the `Box` from a dangling pointer to avoid calling
3575 // `Self::new_zeroed`. This ensures that stack space is never
3576 // allocated for `Self` even on lower opt-levels where this branch
3577 // might not get optimized out.
3578
3579 // SAFETY: Per [1], when `T` is a ZST, `Box<T>`'s only validity
3580 // requirements are that the pointer is non-null and sufficiently
3581 // aligned. Per [2], `NonNull::dangling` produces a pointer which
3582 // is sufficiently aligned. Since the produced pointer is a
3583 // `NonNull`, it is non-null.
3584 //
3585 // [1] Per https://doc.rust-lang.org/1.81.0/std/boxed/index.html#memory-layout:
3586 //
3587 // For zero-sized values, the `Box` pointer has to be non-null and sufficiently aligned.
3588 //
3589 // [2] Per https://doc.rust-lang.org/std/ptr/struct.NonNull.html#method.dangling:
3590 //
3591 // Creates a new `NonNull` that is dangling, but well-aligned.
3592 return Ok(unsafe { Box::from_raw(NonNull::dangling().as_ptr()) });
3593 }
3594
3595 // FIXME(#429): Add a "SAFETY" comment and remove this `allow`.
3596 #[allow(clippy::undocumented_unsafe_blocks)]
3597 let ptr = unsafe { alloc::alloc::alloc_zeroed(layout).cast::<Self>() };
3598 if ptr.is_null() {
3599 return Err(AllocError);
3600 }
3601 // FIXME(#429): Add a "SAFETY" comment and remove this `allow`.
3602 #[allow(clippy::undocumented_unsafe_blocks)]
3603 Ok(unsafe { Box::from_raw(ptr) })
3604 }
3605
3606 /// Creates a `Box<[Self]>` (a boxed slice) from zeroed bytes.
3607 ///
3608 /// This function is useful for allocating large values of `[Self]` on the
3609 /// heap and zero-initializing them, without ever creating a temporary
3610 /// instance of `[Self; _]` on the stack. For example,
3611 /// `u8::new_box_slice_zeroed(1048576)` will allocate the slice directly on
3612 /// the heap; it does not require storing the slice on the stack.
3613 ///
3614 /// On systems that use a heap implementation that supports allocating from
3615 /// pre-zeroed memory, using `new_box_slice_zeroed` may have performance
3616 /// benefits.
3617 ///
3618 /// If `Self` is a zero-sized type, then this function will return a
3619 /// `Box<[Self]>` that has the correct `len`. Such a box cannot contain any
3620 /// actual information, but its `len()` property will report the correct
3621 /// value.
3622 ///
3623 /// # Errors
3624 ///
3625 /// Returns an error on allocation failure. Allocation failure is
3626 /// guaranteed never to cause a panic or an abort.
3627 ///
3628 #[doc = codegen_section!(
3629 header = "h5",
3630 bench = "new_box_zeroed_with_elems",
3631 format = "coco",
3632 arity = 2,
3633 [
3634 open
3635 @index 1
3636 @title "Unsized"
3637 @variant "dynamic_size"
3638 ],
3639 [
3640 @index 2
3641 @title "Dynamically Padded"
3642 @variant "dynamic_padding"
3643 ]
3644 )]
3645 #[must_use = "has no side effects (other than allocation)"]
3646 #[cfg(feature = "alloc")]
3647 #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
3648 #[inline]
3649 fn new_box_zeroed_with_elems(count: usize) -> Result<Box<Self>, AllocError>
3650 where
3651 Self: KnownLayout<PointerMetadata = usize>,
3652 {
3653 // SAFETY: `alloc::alloc::alloc_zeroed` is a valid argument of
3654 // `new_box`. The referent of the pointer returned by `alloc_zeroed`
3655 // (and, consequently, the `Box` derived from it) is a valid instance of
3656 // `Self`, because `Self` is `FromZeros`.
3657 unsafe { crate::util::new_box(count, alloc::alloc::alloc_zeroed) }
3658 }
3659
3660 #[deprecated(since = "0.8.0", note = "renamed to `FromZeros::new_box_zeroed_with_elems`")]
3661 #[doc(hidden)]
3662 #[cfg(feature = "alloc")]
3663 #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
3664 #[must_use = "has no side effects (other than allocation)"]
3665 #[inline(always)]
3666 fn new_box_slice_zeroed(len: usize) -> Result<Box<[Self]>, AllocError>
3667 where
3668 Self: Sized,
3669 {
3670 <[Self]>::new_box_zeroed_with_elems(len)
3671 }
3672
3673 /// Creates a `Vec<Self>` from zeroed bytes.
3674 ///
3675 /// This function is useful for allocating large values of `Vec`s and
3676 /// zero-initializing them, without ever creating a temporary instance of
3677 /// `[Self; _]` (or many temporary instances of `Self`) on the stack. For
3678 /// example, `u8::new_vec_zeroed(1048576)` will allocate directly on the
3679 /// heap; it does not require storing intermediate values on the stack.
3680 ///
3681 /// On systems that use a heap implementation that supports allocating from
3682 /// pre-zeroed memory, using `new_vec_zeroed` may have performance benefits.
3683 ///
3684 /// If `Self` is a zero-sized type, then this function will return a
3685 /// `Vec<Self>` that has the correct `len`. Such a `Vec` cannot contain any
3686 /// actual information, but its `len()` property will report the correct
3687 /// value.
3688 ///
3689 /// # Errors
3690 ///
3691 /// Returns an error on allocation failure. Allocation failure is
3692 /// guaranteed never to cause a panic or an abort.
3693 ///
3694 #[doc = codegen_section!(
3695 header = "h5",
3696 bench = "new_vec_zeroed",
3697 format = "coco_static_size",
3698 )]
3699 #[must_use = "has no side effects (other than allocation)"]
3700 #[cfg(feature = "alloc")]
3701 #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
3702 #[inline(always)]
3703 fn new_vec_zeroed(len: usize) -> Result<Vec<Self>, AllocError>
3704 where
3705 Self: Sized,
3706 {
3707 <[Self]>::new_box_zeroed_with_elems(len).map(Into::into)
3708 }
3709
3710 /// Extends a `Vec<Self>` by pushing `additional` new items onto the end of
3711 /// the vector. The new items are initialized with zeros.
3712 ///
3713 #[doc = codegen_section!(
3714 header = "h5",
3715 bench = "extend_vec_zeroed",
3716 format = "coco_static_size",
3717 )]
3718 #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
3719 #[cfg(feature = "alloc")]
3720 #[cfg_attr(doc_cfg, doc(cfg(all(rust = "1.57.0", feature = "alloc"))))]
3721 #[inline(always)]
3722 fn extend_vec_zeroed(v: &mut Vec<Self>, additional: usize) -> Result<(), AllocError>
3723 where
3724 Self: Sized,
3725 {
3726 // PANICS: We pass `v.len()` for `position`, so the `position > v.len()`
3727 // panic condition is not satisfied.
3728 <Self as FromZeros>::insert_vec_zeroed(v, v.len(), additional)
3729 }
3730
3731 /// Inserts `additional` new items into `Vec<Self>` at `position`. The new
3732 /// items are initialized with zeros.
3733 ///
3734 /// # Panics
3735 ///
3736 /// Panics if `position > v.len()`.
3737 ///
3738 #[doc = codegen_section!(
3739 header = "h5",
3740 bench = "insert_vec_zeroed",
3741 format = "coco_static_size",
3742 )]
3743 #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
3744 #[cfg(feature = "alloc")]
3745 #[cfg_attr(doc_cfg, doc(cfg(all(rust = "1.57.0", feature = "alloc"))))]
3746 #[inline]
3747 fn insert_vec_zeroed(
3748 v: &mut Vec<Self>,
3749 position: usize,
3750 additional: usize,
3751 ) -> Result<(), AllocError>
3752 where
3753 Self: Sized,
3754 {
3755 assert!(position <= v.len());
3756 // We only conditionally compile on versions on which `try_reserve` is
3757 // stable; the Clippy lint is a false positive.
3758 v.try_reserve(additional).map_err(|_| AllocError)?;
3759 // SAFETY: The `try_reserve` call guarantees that these cannot overflow:
3760 // * `ptr.add(position)`
3761 // * `position + additional`
3762 // * `v.len() + additional`
3763 //
3764 // `v.len() - position` cannot overflow because we asserted that
3765 // `position <= v.len()`.
3766 #[allow(clippy::multiple_unsafe_ops_per_block)]
3767 unsafe {
3768 // This is a potentially overlapping copy.
3769 let ptr = v.as_mut_ptr();
3770 #[allow(clippy::arithmetic_side_effects)]
3771 ptr.add(position).copy_to(ptr.add(position + additional), v.len() - position);
3772 ptr.add(position).write_bytes(0, additional);
3773 #[allow(clippy::arithmetic_side_effects)]
3774 v.set_len(v.len() + additional);
3775 }
3776
3777 Ok(())
3778 }
3779}
3780
3781/// Analyzes whether a type is [`FromBytes`].
3782///
3783/// This derive analyzes, at compile time, whether the annotated type satisfies
3784/// the [safety conditions] of `FromBytes` and implements `FromBytes` and its
3785/// supertraits if it is sound to do so. This derive can be applied to structs,
3786/// enums, and unions;
3787/// e.g.:
3788///
3789/// ```
3790/// # use zerocopy_derive::{FromBytes, FromZeros, Immutable};
3791/// #[derive(FromBytes)]
3792/// struct MyStruct {
3793/// # /*
3794/// ...
3795/// # */
3796/// }
3797///
3798/// #[derive(FromBytes)]
3799/// #[repr(u8)]
3800/// enum MyEnum {
3801/// # V00, V01, V02, V03, V04, V05, V06, V07, V08, V09, V0A, V0B, V0C, V0D, V0E,
3802/// # V0F, V10, V11, V12, V13, V14, V15, V16, V17, V18, V19, V1A, V1B, V1C, V1D,
3803/// # V1E, V1F, V20, V21, V22, V23, V24, V25, V26, V27, V28, V29, V2A, V2B, V2C,
3804/// # V2D, V2E, V2F, V30, V31, V32, V33, V34, V35, V36, V37, V38, V39, V3A, V3B,
3805/// # V3C, V3D, V3E, V3F, V40, V41, V42, V43, V44, V45, V46, V47, V48, V49, V4A,
3806/// # V4B, V4C, V4D, V4E, V4F, V50, V51, V52, V53, V54, V55, V56, V57, V58, V59,
3807/// # V5A, V5B, V5C, V5D, V5E, V5F, V60, V61, V62, V63, V64, V65, V66, V67, V68,
3808/// # V69, V6A, V6B, V6C, V6D, V6E, V6F, V70, V71, V72, V73, V74, V75, V76, V77,
3809/// # V78, V79, V7A, V7B, V7C, V7D, V7E, V7F, V80, V81, V82, V83, V84, V85, V86,
3810/// # V87, V88, V89, V8A, V8B, V8C, V8D, V8E, V8F, V90, V91, V92, V93, V94, V95,
3811/// # V96, V97, V98, V99, V9A, V9B, V9C, V9D, V9E, V9F, VA0, VA1, VA2, VA3, VA4,
3812/// # VA5, VA6, VA7, VA8, VA9, VAA, VAB, VAC, VAD, VAE, VAF, VB0, VB1, VB2, VB3,
3813/// # VB4, VB5, VB6, VB7, VB8, VB9, VBA, VBB, VBC, VBD, VBE, VBF, VC0, VC1, VC2,
3814/// # VC3, VC4, VC5, VC6, VC7, VC8, VC9, VCA, VCB, VCC, VCD, VCE, VCF, VD0, VD1,
3815/// # VD2, VD3, VD4, VD5, VD6, VD7, VD8, VD9, VDA, VDB, VDC, VDD, VDE, VDF, VE0,
3816/// # VE1, VE2, VE3, VE4, VE5, VE6, VE7, VE8, VE9, VEA, VEB, VEC, VED, VEE, VEF,
3817/// # VF0, VF1, VF2, VF3, VF4, VF5, VF6, VF7, VF8, VF9, VFA, VFB, VFC, VFD, VFE,
3818/// # VFF,
3819/// # /*
3820/// ...
3821/// # */
3822/// }
3823///
3824/// #[derive(FromBytes, Immutable)]
3825/// union MyUnion {
3826/// # variant: u8,
3827/// # /*
3828/// ...
3829/// # */
3830/// }
3831/// ```
3832///
3833/// [safety conditions]: trait@FromBytes#safety
3834///
3835/// # Analysis
3836///
3837/// *This section describes, roughly, the analysis performed by this derive to
3838/// determine whether it is sound to implement `FromBytes` for a given type.
3839/// Unless you are modifying the implementation of this derive, or attempting to
3840/// manually implement `FromBytes` for a type yourself, you don't need to read
3841/// this section.*
3842///
3843/// If a type has the following properties, then this derive can implement
3844/// `FromBytes` for that type:
3845///
3846/// - If the type is a struct, all of its fields must be `FromBytes`.
3847/// - If the type is an enum:
3848/// - It must have a defined representation which is one of `u8`, `u16`, `i8`,
3849/// or `i16`.
3850/// - The maximum number of discriminants must be used (so that every possible
3851/// bit pattern is a valid one).
3852/// - Its fields must be `FromBytes`.
3853///
3854/// This analysis is subject to change. Unsafe code may *only* rely on the
3855/// documented [safety conditions] of `FromBytes`, and must *not* rely on the
3856/// implementation details of this derive.
3857///
3858/// ## Why isn't an explicit representation required for structs?
3859///
3860/// Neither this derive, nor the [safety conditions] of `FromBytes`, requires
3861/// that structs are marked with `#[repr(C)]`.
3862///
3863/// Per the [Rust reference](reference),
3864///
3865/// > The representation of a type can change the padding between fields, but
3866/// > does not change the layout of the fields themselves.
3867///
3868/// [reference]: https://doc.rust-lang.org/reference/type-layout.html#representations
3869///
3870/// Since the layout of structs only consists of padding bytes and field bytes,
3871/// a struct is soundly `FromBytes` if:
3872/// 1. its padding is soundly `FromBytes`, and
3873/// 2. its fields are soundly `FromBytes`.
3874///
3875/// The answer to the first question is always yes: padding bytes do not have
3876/// any validity constraints. A [discussion] of this question in the Unsafe Code
3877/// Guidelines Working Group concluded that it would be virtually unimaginable
3878/// for future versions of rustc to add validity constraints to padding bytes.
3879///
3880/// [discussion]: https://github.com/rust-lang/unsafe-code-guidelines/issues/174
3881///
3882/// Whether a struct is soundly `FromBytes` therefore solely depends on whether
3883/// its fields are `FromBytes`.
3884#[cfg(any(feature = "derive", test))]
3885#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
3886pub use zerocopy_derive::FromBytes;
3887
3888/// Types for which any bit pattern is valid.
3889///
3890/// Any memory region of the appropriate length which contains initialized bytes
3891/// can be viewed as any `FromBytes` type with no runtime overhead. This is
3892/// useful for efficiently parsing bytes as structured data.
3893///
3894/// # Warning: Padding bytes
3895///
3896/// Note that, when a value is moved or copied, only the non-padding bytes of
3897/// that value are guaranteed to be preserved. It is unsound to assume that
3898/// values written to padding bytes are preserved after a move or copy. For
3899/// example, the following is unsound:
3900///
3901/// ```rust,no_run
3902/// use core::mem::{size_of, transmute};
3903/// use zerocopy::FromZeros;
3904/// # use zerocopy_derive::*;
3905///
3906/// // Assume `Foo` is a type with padding bytes.
3907/// #[derive(FromZeros, Default)]
3908/// struct Foo {
3909/// # /*
3910/// ...
3911/// # */
3912/// }
3913///
3914/// let mut foo: Foo = Foo::default();
3915/// FromZeros::zero(&mut foo);
3916/// // UNSOUND: Although `FromZeros::zero` writes zeros to all bytes of `foo`,
3917/// // those writes are not guaranteed to be preserved in padding bytes when
3918/// // `foo` is moved, so this may expose padding bytes as `u8`s.
3919/// let foo_bytes: [u8; size_of::<Foo>()] = unsafe { transmute(foo) };
3920/// ```
3921///
3922/// # Implementation
3923///
3924/// **Do not implement this trait yourself!** Instead, use
3925/// [`#[derive(FromBytes)]`][derive]; e.g.:
3926///
3927/// ```
3928/// # use zerocopy_derive::{FromBytes, Immutable};
3929/// #[derive(FromBytes)]
3930/// struct MyStruct {
3931/// # /*
3932/// ...
3933/// # */
3934/// }
3935///
3936/// #[derive(FromBytes)]
3937/// #[repr(u8)]
3938/// enum MyEnum {
3939/// # V00, V01, V02, V03, V04, V05, V06, V07, V08, V09, V0A, V0B, V0C, V0D, V0E,
3940/// # V0F, V10, V11, V12, V13, V14, V15, V16, V17, V18, V19, V1A, V1B, V1C, V1D,
3941/// # V1E, V1F, V20, V21, V22, V23, V24, V25, V26, V27, V28, V29, V2A, V2B, V2C,
3942/// # V2D, V2E, V2F, V30, V31, V32, V33, V34, V35, V36, V37, V38, V39, V3A, V3B,
3943/// # V3C, V3D, V3E, V3F, V40, V41, V42, V43, V44, V45, V46, V47, V48, V49, V4A,
3944/// # V4B, V4C, V4D, V4E, V4F, V50, V51, V52, V53, V54, V55, V56, V57, V58, V59,
3945/// # V5A, V5B, V5C, V5D, V5E, V5F, V60, V61, V62, V63, V64, V65, V66, V67, V68,
3946/// # V69, V6A, V6B, V6C, V6D, V6E, V6F, V70, V71, V72, V73, V74, V75, V76, V77,
3947/// # V78, V79, V7A, V7B, V7C, V7D, V7E, V7F, V80, V81, V82, V83, V84, V85, V86,
3948/// # V87, V88, V89, V8A, V8B, V8C, V8D, V8E, V8F, V90, V91, V92, V93, V94, V95,
3949/// # V96, V97, V98, V99, V9A, V9B, V9C, V9D, V9E, V9F, VA0, VA1, VA2, VA3, VA4,
3950/// # VA5, VA6, VA7, VA8, VA9, VAA, VAB, VAC, VAD, VAE, VAF, VB0, VB1, VB2, VB3,
3951/// # VB4, VB5, VB6, VB7, VB8, VB9, VBA, VBB, VBC, VBD, VBE, VBF, VC0, VC1, VC2,
3952/// # VC3, VC4, VC5, VC6, VC7, VC8, VC9, VCA, VCB, VCC, VCD, VCE, VCF, VD0, VD1,
3953/// # VD2, VD3, VD4, VD5, VD6, VD7, VD8, VD9, VDA, VDB, VDC, VDD, VDE, VDF, VE0,
3954/// # VE1, VE2, VE3, VE4, VE5, VE6, VE7, VE8, VE9, VEA, VEB, VEC, VED, VEE, VEF,
3955/// # VF0, VF1, VF2, VF3, VF4, VF5, VF6, VF7, VF8, VF9, VFA, VFB, VFC, VFD, VFE,
3956/// # VFF,
3957/// # /*
3958/// ...
3959/// # */
3960/// }
3961///
3962/// #[derive(FromBytes, Immutable)]
3963/// union MyUnion {
3964/// # variant: u8,
3965/// # /*
3966/// ...
3967/// # */
3968/// }
3969/// ```
3970///
3971/// This derive performs a sophisticated, compile-time safety analysis to
3972/// determine whether a type is `FromBytes`.
3973///
3974/// # Safety
3975///
3976/// *This section describes what is required in order for `T: FromBytes`, and
3977/// what unsafe code may assume of such types. If you don't plan on implementing
3978/// `FromBytes` manually, and you don't plan on writing unsafe code that
3979/// operates on `FromBytes` types, then you don't need to read this section.*
3980///
3981/// If `T: FromBytes`, then unsafe code may assume that it is sound to produce a
3982/// `T` whose bytes are initialized to any sequence of valid `u8`s (in other
3983/// words, any byte value which is not uninitialized). If a type is marked as
3984/// `FromBytes` which violates this contract, it may cause undefined behavior.
3985///
3986/// `#[derive(FromBytes)]` only permits [types which satisfy these
3987/// requirements][derive-analysis].
3988///
3989#[cfg_attr(
3990 feature = "derive",
3991 doc = "[derive]: zerocopy_derive::FromBytes",
3992 doc = "[derive-analysis]: zerocopy_derive::FromBytes#analysis"
3993)]
3994#[cfg_attr(
3995 not(feature = "derive"),
3996 doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromBytes.html"),
3997 doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromBytes.html#analysis"),
3998)]
3999#[cfg_attr(
4000 not(no_zerocopy_diagnostic_on_unimplemented_1_78_0),
4001 diagnostic::on_unimplemented(note = "Consider adding `#[derive(FromBytes)]` to `{Self}`")
4002)]
4003pub unsafe trait FromBytes: FromZeros {
4004 // The `Self: Sized` bound makes it so that `FromBytes` is still object
4005 // safe.
4006 #[doc(hidden)]
4007 fn only_derive_is_allowed_to_implement_this_trait()
4008 where
4009 Self: Sized;
4010
4011 /// Interprets the given `source` as a `&Self`.
4012 ///
4013 /// This method attempts to return a reference to `source` interpreted as a
4014 /// `Self`. If the length of `source` is not a [valid size of
4015 /// `Self`][valid-size], or if `source` is not appropriately aligned, this
4016 /// returns `Err`. If [`Self: Unaligned`][self-unaligned], you can
4017 /// [infallibly discard the alignment error][size-error-from].
4018 ///
4019 /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
4020 ///
4021 /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
4022 /// [self-unaligned]: Unaligned
4023 /// [size-error-from]: error/struct.SizeError.html#method.from-1
4024 /// [slice-dst]: KnownLayout#dynamically-sized-types
4025 ///
4026 /// # Compile-Time Assertions
4027 ///
4028 /// This method cannot yet be used on unsized types whose dynamically-sized
4029 /// component is zero-sized. Attempting to use this method on such types
4030 /// results in a compile-time assertion error; e.g.:
4031 ///
4032 /// ```compile_fail,E0080
4033 /// use zerocopy::*;
4034 /// # use zerocopy_derive::*;
4035 ///
4036 /// #[derive(FromBytes, Immutable, KnownLayout)]
4037 /// #[repr(C)]
4038 /// struct ZSTy {
4039 /// leading_sized: u16,
4040 /// trailing_dst: [()],
4041 /// }
4042 ///
4043 /// let _ = ZSTy::ref_from_bytes(0u16.as_bytes()); // âš Compile Error!
4044 /// ```
4045 ///
4046 /// # Examples
4047 ///
4048 /// ```
4049 /// use zerocopy::FromBytes;
4050 /// # use zerocopy_derive::*;
4051 ///
4052 /// #[derive(FromBytes, KnownLayout, Immutable)]
4053 /// #[repr(C)]
4054 /// struct PacketHeader {
4055 /// src_port: [u8; 2],
4056 /// dst_port: [u8; 2],
4057 /// length: [u8; 2],
4058 /// checksum: [u8; 2],
4059 /// }
4060 ///
4061 /// #[derive(FromBytes, KnownLayout, Immutable)]
4062 /// #[repr(C)]
4063 /// struct Packet {
4064 /// header: PacketHeader,
4065 /// body: [u8],
4066 /// }
4067 ///
4068 /// // These bytes encode a `Packet`.
4069 /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11][..];
4070 ///
4071 /// let packet = Packet::ref_from_bytes(bytes).unwrap();
4072 ///
4073 /// assert_eq!(packet.header.src_port, [0, 1]);
4074 /// assert_eq!(packet.header.dst_port, [2, 3]);
4075 /// assert_eq!(packet.header.length, [4, 5]);
4076 /// assert_eq!(packet.header.checksum, [6, 7]);
4077 /// assert_eq!(packet.body, [8, 9, 10, 11]);
4078 /// ```
4079 ///
4080 #[doc = codegen_section!(
4081 header = "h5",
4082 bench = "ref_from_bytes",
4083 format = "coco",
4084 arity = 3,
4085 [
4086 open
4087 @index 1
4088 @title "Sized"
4089 @variant "static_size"
4090 ],
4091 [
4092 @index 2
4093 @title "Unsized"
4094 @variant "dynamic_size"
4095 ],
4096 [
4097 @index 3
4098 @title "Dynamically Padded"
4099 @variant "dynamic_padding"
4100 ]
4101 )]
4102 #[must_use = "has no side effects"]
4103 #[cfg_attr(zerocopy_inline_always, inline(always))]
4104 #[cfg_attr(not(zerocopy_inline_always), inline)]
4105 fn ref_from_bytes(source: &[u8]) -> Result<&Self, CastError<&[u8], Self>>
4106 where
4107 Self: KnownLayout + Immutable,
4108 {
4109 static_assert_dst_is_not_zst!(Self);
4110 match Ptr::from_ref(source).try_cast_into_no_leftover::<_, BecauseImmutable>(None) {
4111 Ok(ptr) => Ok(ptr.recall_validity().as_ref()),
4112 Err(err) => Err(err.map_src(|src| src.as_ref())),
4113 }
4114 }
4115
4116 /// Interprets the prefix of the given `source` as a `&Self` without
4117 /// copying.
4118 ///
4119 /// This method computes the [largest possible size of `Self`][valid-size]
4120 /// that can fit in the leading bytes of `source`, then attempts to return
4121 /// both a reference to those bytes interpreted as a `Self`, and a reference
4122 /// to the remaining bytes. If there are insufficient bytes, or if `source`
4123 /// is not appropriately aligned, this returns `Err`. If [`Self:
4124 /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
4125 /// error][size-error-from].
4126 ///
4127 /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
4128 ///
4129 /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
4130 /// [self-unaligned]: Unaligned
4131 /// [size-error-from]: error/struct.SizeError.html#method.from-1
4132 /// [slice-dst]: KnownLayout#dynamically-sized-types
4133 ///
4134 /// # Compile-Time Assertions
4135 ///
4136 /// This method cannot yet be used on unsized types whose dynamically-sized
4137 /// component is zero-sized. See [`ref_from_prefix_with_elems`], which does
4138 /// support such types. Attempting to use this method on such types results
4139 /// in a compile-time assertion error; e.g.:
4140 ///
4141 /// ```compile_fail,E0080
4142 /// use zerocopy::*;
4143 /// # use zerocopy_derive::*;
4144 ///
4145 /// #[derive(FromBytes, Immutable, KnownLayout)]
4146 /// #[repr(C)]
4147 /// struct ZSTy {
4148 /// leading_sized: u16,
4149 /// trailing_dst: [()],
4150 /// }
4151 ///
4152 /// let _ = ZSTy::ref_from_prefix(0u16.as_bytes()); // âš Compile Error!
4153 /// ```
4154 ///
4155 /// [`ref_from_prefix_with_elems`]: FromBytes::ref_from_prefix_with_elems
4156 ///
4157 /// # Examples
4158 ///
4159 /// ```
4160 /// use zerocopy::FromBytes;
4161 /// # use zerocopy_derive::*;
4162 ///
4163 /// #[derive(FromBytes, KnownLayout, Immutable)]
4164 /// #[repr(C)]
4165 /// struct PacketHeader {
4166 /// src_port: [u8; 2],
4167 /// dst_port: [u8; 2],
4168 /// length: [u8; 2],
4169 /// checksum: [u8; 2],
4170 /// }
4171 ///
4172 /// #[derive(FromBytes, KnownLayout, Immutable)]
4173 /// #[repr(C)]
4174 /// struct Packet {
4175 /// header: PacketHeader,
4176 /// body: [[u8; 2]],
4177 /// }
4178 ///
4179 /// // These are more bytes than are needed to encode a `Packet`.
4180 /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14][..];
4181 ///
4182 /// let (packet, suffix) = Packet::ref_from_prefix(bytes).unwrap();
4183 ///
4184 /// assert_eq!(packet.header.src_port, [0, 1]);
4185 /// assert_eq!(packet.header.dst_port, [2, 3]);
4186 /// assert_eq!(packet.header.length, [4, 5]);
4187 /// assert_eq!(packet.header.checksum, [6, 7]);
4188 /// assert_eq!(packet.body, [[8, 9], [10, 11], [12, 13]]);
4189 /// assert_eq!(suffix, &[14u8][..]);
4190 /// ```
4191 ///
4192 #[doc = codegen_section!(
4193 header = "h5",
4194 bench = "ref_from_prefix",
4195 format = "coco",
4196 arity = 3,
4197 [
4198 open
4199 @index 1
4200 @title "Sized"
4201 @variant "static_size"
4202 ],
4203 [
4204 @index 2
4205 @title "Unsized"
4206 @variant "dynamic_size"
4207 ],
4208 [
4209 @index 3
4210 @title "Dynamically Padded"
4211 @variant "dynamic_padding"
4212 ]
4213 )]
4214 #[must_use = "has no side effects"]
4215 #[cfg_attr(zerocopy_inline_always, inline(always))]
4216 #[cfg_attr(not(zerocopy_inline_always), inline)]
4217 fn ref_from_prefix(source: &[u8]) -> Result<(&Self, &[u8]), CastError<&[u8], Self>>
4218 where
4219 Self: KnownLayout + Immutable,
4220 {
4221 static_assert_dst_is_not_zst!(Self);
4222 ref_from_prefix_suffix(source, None, CastType::Prefix)
4223 }
4224
4225 /// Interprets the suffix of the given bytes as a `&Self`.
4226 ///
4227 /// This method computes the [largest possible size of `Self`][valid-size]
4228 /// that can fit in the trailing bytes of `source`, then attempts to return
4229 /// both a reference to those bytes interpreted as a `Self`, and a reference
4230 /// to the preceding bytes. If there are insufficient bytes, or if that
4231 /// suffix of `source` is not appropriately aligned, this returns `Err`. If
4232 /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
4233 /// alignment error][size-error-from].
4234 ///
4235 /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
4236 ///
4237 /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
4238 /// [self-unaligned]: Unaligned
4239 /// [size-error-from]: error/struct.SizeError.html#method.from-1
4240 /// [slice-dst]: KnownLayout#dynamically-sized-types
4241 ///
4242 /// # Compile-Time Assertions
4243 ///
4244 /// This method cannot yet be used on unsized types whose dynamically-sized
4245 /// component is zero-sized. See [`ref_from_suffix_with_elems`], which does
4246 /// support such types. Attempting to use this method on such types results
4247 /// in a compile-time assertion error; e.g.:
4248 ///
4249 /// ```compile_fail,E0080
4250 /// use zerocopy::*;
4251 /// # use zerocopy_derive::*;
4252 ///
4253 /// #[derive(FromBytes, Immutable, KnownLayout)]
4254 /// #[repr(C)]
4255 /// struct ZSTy {
4256 /// leading_sized: u16,
4257 /// trailing_dst: [()],
4258 /// }
4259 ///
4260 /// let _ = ZSTy::ref_from_suffix(0u16.as_bytes()); // âš Compile Error!
4261 /// ```
4262 ///
4263 /// [`ref_from_suffix_with_elems`]: FromBytes::ref_from_suffix_with_elems
4264 ///
4265 /// # Examples
4266 ///
4267 /// ```
4268 /// use zerocopy::FromBytes;
4269 /// # use zerocopy_derive::*;
4270 ///
4271 /// #[derive(FromBytes, Immutable, KnownLayout)]
4272 /// #[repr(C)]
4273 /// struct PacketTrailer {
4274 /// frame_check_sequence: [u8; 4],
4275 /// }
4276 ///
4277 /// // These are more bytes than are needed to encode a `PacketTrailer`.
4278 /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4279 ///
4280 /// let (prefix, trailer) = PacketTrailer::ref_from_suffix(bytes).unwrap();
4281 ///
4282 /// assert_eq!(prefix, &[0, 1, 2, 3, 4, 5][..]);
4283 /// assert_eq!(trailer.frame_check_sequence, [6, 7, 8, 9]);
4284 /// ```
4285 ///
4286 #[doc = codegen_section!(
4287 header = "h5",
4288 bench = "ref_from_suffix",
4289 format = "coco",
4290 arity = 3,
4291 [
4292 open
4293 @index 1
4294 @title "Sized"
4295 @variant "static_size"
4296 ],
4297 [
4298 @index 2
4299 @title "Unsized"
4300 @variant "dynamic_size"
4301 ],
4302 [
4303 @index 3
4304 @title "Dynamically Padded"
4305 @variant "dynamic_padding"
4306 ]
4307 )]
4308 #[must_use = "has no side effects"]
4309 #[cfg_attr(zerocopy_inline_always, inline(always))]
4310 #[cfg_attr(not(zerocopy_inline_always), inline)]
4311 fn ref_from_suffix(source: &[u8]) -> Result<(&[u8], &Self), CastError<&[u8], Self>>
4312 where
4313 Self: Immutable + KnownLayout,
4314 {
4315 static_assert_dst_is_not_zst!(Self);
4316 ref_from_prefix_suffix(source, None, CastType::Suffix).map(swap)
4317 }
4318
4319 /// Interprets the given `source` as a `&mut Self`.
4320 ///
4321 /// This method attempts to return a reference to `source` interpreted as a
4322 /// `Self`. If the length of `source` is not a [valid size of
4323 /// `Self`][valid-size], or if `source` is not appropriately aligned, this
4324 /// returns `Err`. If [`Self: Unaligned`][self-unaligned], you can
4325 /// [infallibly discard the alignment error][size-error-from].
4326 ///
4327 /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
4328 ///
4329 /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
4330 /// [self-unaligned]: Unaligned
4331 /// [size-error-from]: error/struct.SizeError.html#method.from-1
4332 /// [slice-dst]: KnownLayout#dynamically-sized-types
4333 ///
4334 /// # Compile-Time Assertions
4335 ///
4336 /// This method cannot yet be used on unsized types whose dynamically-sized
4337 /// component is zero-sized. See [`mut_from_prefix_with_elems`], which does
4338 /// support such types. Attempting to use this method on such types results
4339 /// in a compile-time assertion error; e.g.:
4340 ///
4341 /// ```compile_fail,E0080
4342 /// use zerocopy::*;
4343 /// # use zerocopy_derive::*;
4344 ///
4345 /// #[derive(FromBytes, Immutable, IntoBytes, KnownLayout)]
4346 /// #[repr(C, packed)]
4347 /// struct ZSTy {
4348 /// leading_sized: [u8; 2],
4349 /// trailing_dst: [()],
4350 /// }
4351 ///
4352 /// let mut source = [85, 85];
4353 /// let _ = ZSTy::mut_from_bytes(&mut source[..]); // âš Compile Error!
4354 /// ```
4355 ///
4356 /// [`mut_from_prefix_with_elems`]: FromBytes::mut_from_prefix_with_elems
4357 ///
4358 /// # Examples
4359 ///
4360 /// ```
4361 /// use zerocopy::FromBytes;
4362 /// # use zerocopy_derive::*;
4363 ///
4364 /// #[derive(FromBytes, IntoBytes, KnownLayout, Immutable)]
4365 /// #[repr(C)]
4366 /// struct PacketHeader {
4367 /// src_port: [u8; 2],
4368 /// dst_port: [u8; 2],
4369 /// length: [u8; 2],
4370 /// checksum: [u8; 2],
4371 /// }
4372 ///
4373 /// // These bytes encode a `PacketHeader`.
4374 /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7][..];
4375 ///
4376 /// let header = PacketHeader::mut_from_bytes(bytes).unwrap();
4377 ///
4378 /// assert_eq!(header.src_port, [0, 1]);
4379 /// assert_eq!(header.dst_port, [2, 3]);
4380 /// assert_eq!(header.length, [4, 5]);
4381 /// assert_eq!(header.checksum, [6, 7]);
4382 ///
4383 /// header.checksum = [0, 0];
4384 ///
4385 /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 0, 0]);
4386 ///
4387 /// ```
4388 ///
4389 #[doc = codegen_header!("h5", "mut_from_bytes")]
4390 ///
4391 /// See [`FromBytes::ref_from_bytes`](#method.ref_from_bytes.codegen).
4392 #[must_use = "has no side effects"]
4393 #[cfg_attr(zerocopy_inline_always, inline(always))]
4394 #[cfg_attr(not(zerocopy_inline_always), inline)]
4395 fn mut_from_bytes(source: &mut [u8]) -> Result<&mut Self, CastError<&mut [u8], Self>>
4396 where
4397 Self: IntoBytes + KnownLayout,
4398 {
4399 static_assert_dst_is_not_zst!(Self);
4400 match Ptr::from_mut(source).try_cast_into_no_leftover::<_, BecauseExclusive>(None) {
4401 Ok(ptr) => Ok(ptr.recall_validity::<_, (_, (_, _))>().as_mut()),
4402 Err(err) => Err(err.map_src(|src| src.as_mut())),
4403 }
4404 }
4405
4406 /// Interprets the prefix of the given `source` as a `&mut Self` without
4407 /// copying.
4408 ///
4409 /// This method computes the [largest possible size of `Self`][valid-size]
4410 /// that can fit in the leading bytes of `source`, then attempts to return
4411 /// both a reference to those bytes interpreted as a `Self`, and a reference
4412 /// to the remaining bytes. If there are insufficient bytes, or if `source`
4413 /// is not appropriately aligned, this returns `Err`. If [`Self:
4414 /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
4415 /// error][size-error-from].
4416 ///
4417 /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
4418 ///
4419 /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
4420 /// [self-unaligned]: Unaligned
4421 /// [size-error-from]: error/struct.SizeError.html#method.from-1
4422 /// [slice-dst]: KnownLayout#dynamically-sized-types
4423 ///
4424 /// # Compile-Time Assertions
4425 ///
4426 /// This method cannot yet be used on unsized types whose dynamically-sized
4427 /// component is zero-sized. See [`mut_from_suffix_with_elems`], which does
4428 /// support such types. Attempting to use this method on such types results
4429 /// in a compile-time assertion error; e.g.:
4430 ///
4431 /// ```compile_fail,E0080
4432 /// use zerocopy::*;
4433 /// # use zerocopy_derive::*;
4434 ///
4435 /// #[derive(FromBytes, Immutable, IntoBytes, KnownLayout)]
4436 /// #[repr(C, packed)]
4437 /// struct ZSTy {
4438 /// leading_sized: [u8; 2],
4439 /// trailing_dst: [()],
4440 /// }
4441 ///
4442 /// let mut source = [85, 85];
4443 /// let _ = ZSTy::mut_from_prefix(&mut source[..]); // âš Compile Error!
4444 /// ```
4445 ///
4446 /// [`mut_from_suffix_with_elems`]: FromBytes::mut_from_suffix_with_elems
4447 ///
4448 /// # Examples
4449 ///
4450 /// ```
4451 /// use zerocopy::FromBytes;
4452 /// # use zerocopy_derive::*;
4453 ///
4454 /// #[derive(FromBytes, IntoBytes, KnownLayout, Immutable)]
4455 /// #[repr(C)]
4456 /// struct PacketHeader {
4457 /// src_port: [u8; 2],
4458 /// dst_port: [u8; 2],
4459 /// length: [u8; 2],
4460 /// checksum: [u8; 2],
4461 /// }
4462 ///
4463 /// // These are more bytes than are needed to encode a `PacketHeader`.
4464 /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4465 ///
4466 /// let (header, body) = PacketHeader::mut_from_prefix(bytes).unwrap();
4467 ///
4468 /// assert_eq!(header.src_port, [0, 1]);
4469 /// assert_eq!(header.dst_port, [2, 3]);
4470 /// assert_eq!(header.length, [4, 5]);
4471 /// assert_eq!(header.checksum, [6, 7]);
4472 /// assert_eq!(body, &[8, 9][..]);
4473 ///
4474 /// header.checksum = [0, 0];
4475 /// body.fill(1);
4476 ///
4477 /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 0, 0, 1, 1]);
4478 /// ```
4479 ///
4480 #[doc = codegen_header!("h5", "mut_from_prefix")]
4481 ///
4482 /// See [`FromBytes::ref_from_prefix`](#method.ref_from_prefix.codegen).
4483 #[must_use = "has no side effects"]
4484 #[cfg_attr(zerocopy_inline_always, inline(always))]
4485 #[cfg_attr(not(zerocopy_inline_always), inline)]
4486 fn mut_from_prefix(
4487 source: &mut [u8],
4488 ) -> Result<(&mut Self, &mut [u8]), CastError<&mut [u8], Self>>
4489 where
4490 Self: IntoBytes + KnownLayout,
4491 {
4492 static_assert_dst_is_not_zst!(Self);
4493 mut_from_prefix_suffix(source, None, CastType::Prefix)
4494 }
4495
4496 /// Interprets the suffix of the given `source` as a `&mut Self` without
4497 /// copying.
4498 ///
4499 /// This method computes the [largest possible size of `Self`][valid-size]
4500 /// that can fit in the trailing bytes of `source`, then attempts to return
4501 /// both a reference to those bytes interpreted as a `Self`, and a reference
4502 /// to the preceding bytes. If there are insufficient bytes, or if that
4503 /// suffix of `source` is not appropriately aligned, this returns `Err`. If
4504 /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
4505 /// alignment error][size-error-from].
4506 ///
4507 /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
4508 ///
4509 /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
4510 /// [self-unaligned]: Unaligned
4511 /// [size-error-from]: error/struct.SizeError.html#method.from-1
4512 /// [slice-dst]: KnownLayout#dynamically-sized-types
4513 ///
4514 /// # Compile-Time Assertions
4515 ///
4516 /// This method cannot yet be used on unsized types whose dynamically-sized
4517 /// component is zero-sized. Attempting to use this method on such types
4518 /// results in a compile-time assertion error; e.g.:
4519 ///
4520 /// ```compile_fail,E0080
4521 /// use zerocopy::*;
4522 /// # use zerocopy_derive::*;
4523 ///
4524 /// #[derive(FromBytes, Immutable, IntoBytes, KnownLayout)]
4525 /// #[repr(C, packed)]
4526 /// struct ZSTy {
4527 /// leading_sized: [u8; 2],
4528 /// trailing_dst: [()],
4529 /// }
4530 ///
4531 /// let mut source = [85, 85];
4532 /// let _ = ZSTy::mut_from_suffix(&mut source[..]); // âš Compile Error!
4533 /// ```
4534 ///
4535 /// # Examples
4536 ///
4537 /// ```
4538 /// use zerocopy::FromBytes;
4539 /// # use zerocopy_derive::*;
4540 ///
4541 /// #[derive(FromBytes, IntoBytes, KnownLayout, Immutable)]
4542 /// #[repr(C)]
4543 /// struct PacketTrailer {
4544 /// frame_check_sequence: [u8; 4],
4545 /// }
4546 ///
4547 /// // These are more bytes than are needed to encode a `PacketTrailer`.
4548 /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4549 ///
4550 /// let (prefix, trailer) = PacketTrailer::mut_from_suffix(bytes).unwrap();
4551 ///
4552 /// assert_eq!(prefix, &[0u8, 1, 2, 3, 4, 5][..]);
4553 /// assert_eq!(trailer.frame_check_sequence, [6, 7, 8, 9]);
4554 ///
4555 /// prefix.fill(0);
4556 /// trailer.frame_check_sequence.fill(1);
4557 ///
4558 /// assert_eq!(bytes, [0, 0, 0, 0, 0, 0, 1, 1, 1, 1]);
4559 /// ```
4560 ///
4561 #[doc = codegen_header!("h5", "mut_from_suffix")]
4562 ///
4563 /// See [`FromBytes::ref_from_suffix`](#method.ref_from_suffix.codegen).
4564 #[must_use = "has no side effects"]
4565 #[cfg_attr(zerocopy_inline_always, inline(always))]
4566 #[cfg_attr(not(zerocopy_inline_always), inline)]
4567 fn mut_from_suffix(
4568 source: &mut [u8],
4569 ) -> Result<(&mut [u8], &mut Self), CastError<&mut [u8], Self>>
4570 where
4571 Self: IntoBytes + KnownLayout,
4572 {
4573 static_assert_dst_is_not_zst!(Self);
4574 mut_from_prefix_suffix(source, None, CastType::Suffix).map(swap)
4575 }
4576
4577 /// Interprets the given `source` as a `&Self` with a DST length equal to
4578 /// `count`.
4579 ///
4580 /// This method attempts to return a reference to `source` interpreted as a
4581 /// `Self` with `count` trailing elements. If the length of `source` is not
4582 /// equal to the size of `Self` with `count` elements, or if `source` is not
4583 /// appropriately aligned, this returns `Err`. If [`Self:
4584 /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
4585 /// error][size-error-from].
4586 ///
4587 /// [self-unaligned]: Unaligned
4588 /// [size-error-from]: error/struct.SizeError.html#method.from-1
4589 ///
4590 /// # Examples
4591 ///
4592 /// ```
4593 /// use zerocopy::FromBytes;
4594 /// # use zerocopy_derive::*;
4595 ///
4596 /// # #[derive(Debug, PartialEq, Eq)]
4597 /// #[derive(FromBytes, Immutable)]
4598 /// #[repr(C)]
4599 /// struct Pixel {
4600 /// r: u8,
4601 /// g: u8,
4602 /// b: u8,
4603 /// a: u8,
4604 /// }
4605 ///
4606 /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7][..];
4607 ///
4608 /// let pixels = <[Pixel]>::ref_from_bytes_with_elems(bytes, 2).unwrap();
4609 ///
4610 /// assert_eq!(pixels, &[
4611 /// Pixel { r: 0, g: 1, b: 2, a: 3 },
4612 /// Pixel { r: 4, g: 5, b: 6, a: 7 },
4613 /// ]);
4614 ///
4615 /// ```
4616 ///
4617 /// Since an explicit `count` is provided, this method supports types with
4618 /// zero-sized trailing slice elements. Methods such as [`ref_from_bytes`]
4619 /// which do not take an explicit count do not support such types.
4620 ///
4621 /// ```
4622 /// use zerocopy::*;
4623 /// # use zerocopy_derive::*;
4624 ///
4625 /// #[derive(FromBytes, Immutable, KnownLayout)]
4626 /// #[repr(C)]
4627 /// struct ZSTy {
4628 /// leading_sized: [u8; 2],
4629 /// trailing_dst: [()],
4630 /// }
4631 ///
4632 /// let src = &[85, 85][..];
4633 /// let zsty = ZSTy::ref_from_bytes_with_elems(src, 42).unwrap();
4634 /// assert_eq!(zsty.trailing_dst.len(), 42);
4635 /// ```
4636 ///
4637 /// [`ref_from_bytes`]: FromBytes::ref_from_bytes
4638 ///
4639 #[doc = codegen_section!(
4640 header = "h5",
4641 bench = "ref_from_bytes_with_elems",
4642 format = "coco",
4643 arity = 2,
4644 [
4645 open
4646 @index 1
4647 @title "Unsized"
4648 @variant "dynamic_size"
4649 ],
4650 [
4651 @index 2
4652 @title "Dynamically Padded"
4653 @variant "dynamic_padding"
4654 ]
4655 )]
4656 #[must_use = "has no side effects"]
4657 #[cfg_attr(zerocopy_inline_always, inline(always))]
4658 #[cfg_attr(not(zerocopy_inline_always), inline)]
4659 fn ref_from_bytes_with_elems(
4660 source: &[u8],
4661 count: usize,
4662 ) -> Result<&Self, CastError<&[u8], Self>>
4663 where
4664 Self: KnownLayout<PointerMetadata = usize> + Immutable,
4665 {
4666 let source = Ptr::from_ref(source);
4667 let maybe_slf = source.try_cast_into_no_leftover::<_, BecauseImmutable>(Some(count));
4668 match maybe_slf {
4669 Ok(slf) => Ok(slf.recall_validity().as_ref()),
4670 Err(err) => Err(err.map_src(|s| s.as_ref())),
4671 }
4672 }
4673
4674 /// Interprets the prefix of the given `source` as a DST `&Self` with length
4675 /// equal to `count`.
4676 ///
4677 /// This method attempts to return a reference to the prefix of `source`
4678 /// interpreted as a `Self` with `count` trailing elements, and a reference
4679 /// to the remaining bytes. If there are insufficient bytes, or if `source`
4680 /// is not appropriately aligned, this returns `Err`. If [`Self:
4681 /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
4682 /// error][size-error-from].
4683 ///
4684 /// [self-unaligned]: Unaligned
4685 /// [size-error-from]: error/struct.SizeError.html#method.from-1
4686 ///
4687 /// # Examples
4688 ///
4689 /// ```
4690 /// use zerocopy::FromBytes;
4691 /// # use zerocopy_derive::*;
4692 ///
4693 /// # #[derive(Debug, PartialEq, Eq)]
4694 /// #[derive(FromBytes, Immutable)]
4695 /// #[repr(C)]
4696 /// struct Pixel {
4697 /// r: u8,
4698 /// g: u8,
4699 /// b: u8,
4700 /// a: u8,
4701 /// }
4702 ///
4703 /// // These are more bytes than are needed to encode two `Pixel`s.
4704 /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4705 ///
4706 /// let (pixels, suffix) = <[Pixel]>::ref_from_prefix_with_elems(bytes, 2).unwrap();
4707 ///
4708 /// assert_eq!(pixels, &[
4709 /// Pixel { r: 0, g: 1, b: 2, a: 3 },
4710 /// Pixel { r: 4, g: 5, b: 6, a: 7 },
4711 /// ]);
4712 ///
4713 /// assert_eq!(suffix, &[8, 9]);
4714 /// ```
4715 ///
4716 /// Since an explicit `count` is provided, this method supports types with
4717 /// zero-sized trailing slice elements. Methods such as [`ref_from_prefix`]
4718 /// which do not take an explicit count do not support such types.
4719 ///
4720 /// ```
4721 /// use zerocopy::*;
4722 /// # use zerocopy_derive::*;
4723 ///
4724 /// #[derive(FromBytes, Immutable, KnownLayout)]
4725 /// #[repr(C)]
4726 /// struct ZSTy {
4727 /// leading_sized: [u8; 2],
4728 /// trailing_dst: [()],
4729 /// }
4730 ///
4731 /// let src = &[85, 85][..];
4732 /// let (zsty, _) = ZSTy::ref_from_prefix_with_elems(src, 42).unwrap();
4733 /// assert_eq!(zsty.trailing_dst.len(), 42);
4734 /// ```
4735 ///
4736 /// [`ref_from_prefix`]: FromBytes::ref_from_prefix
4737 ///
4738 #[doc = codegen_section!(
4739 header = "h5",
4740 bench = "ref_from_prefix_with_elems",
4741 format = "coco",
4742 arity = 2,
4743 [
4744 open
4745 @index 1
4746 @title "Unsized"
4747 @variant "dynamic_size"
4748 ],
4749 [
4750 @index 2
4751 @title "Dynamically Padded"
4752 @variant "dynamic_padding"
4753 ]
4754 )]
4755 #[must_use = "has no side effects"]
4756 #[cfg_attr(zerocopy_inline_always, inline(always))]
4757 #[cfg_attr(not(zerocopy_inline_always), inline)]
4758 fn ref_from_prefix_with_elems(
4759 source: &[u8],
4760 count: usize,
4761 ) -> Result<(&Self, &[u8]), CastError<&[u8], Self>>
4762 where
4763 Self: KnownLayout<PointerMetadata = usize> + Immutable,
4764 {
4765 ref_from_prefix_suffix(source, Some(count), CastType::Prefix)
4766 }
4767
4768 /// Interprets the suffix of the given `source` as a DST `&Self` with length
4769 /// equal to `count`.
4770 ///
4771 /// This method attempts to return a reference to the suffix of `source`
4772 /// interpreted as a `Self` with `count` trailing elements, and a reference
4773 /// to the preceding bytes. If there are insufficient bytes, or if that
4774 /// suffix of `source` is not appropriately aligned, this returns `Err`. If
4775 /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
4776 /// alignment error][size-error-from].
4777 ///
4778 /// [self-unaligned]: Unaligned
4779 /// [size-error-from]: error/struct.SizeError.html#method.from-1
4780 ///
4781 /// # Examples
4782 ///
4783 /// ```
4784 /// use zerocopy::FromBytes;
4785 /// # use zerocopy_derive::*;
4786 ///
4787 /// # #[derive(Debug, PartialEq, Eq)]
4788 /// #[derive(FromBytes, Immutable)]
4789 /// #[repr(C)]
4790 /// struct Pixel {
4791 /// r: u8,
4792 /// g: u8,
4793 /// b: u8,
4794 /// a: u8,
4795 /// }
4796 ///
4797 /// // These are more bytes than are needed to encode two `Pixel`s.
4798 /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4799 ///
4800 /// let (prefix, pixels) = <[Pixel]>::ref_from_suffix_with_elems(bytes, 2).unwrap();
4801 ///
4802 /// assert_eq!(prefix, &[0, 1]);
4803 ///
4804 /// assert_eq!(pixels, &[
4805 /// Pixel { r: 2, g: 3, b: 4, a: 5 },
4806 /// Pixel { r: 6, g: 7, b: 8, a: 9 },
4807 /// ]);
4808 /// ```
4809 ///
4810 /// Since an explicit `count` is provided, this method supports types with
4811 /// zero-sized trailing slice elements. Methods such as [`ref_from_suffix`]
4812 /// which do not take an explicit count do not support such types.
4813 ///
4814 /// ```
4815 /// use zerocopy::*;
4816 /// # use zerocopy_derive::*;
4817 ///
4818 /// #[derive(FromBytes, Immutable, KnownLayout)]
4819 /// #[repr(C)]
4820 /// struct ZSTy {
4821 /// leading_sized: [u8; 2],
4822 /// trailing_dst: [()],
4823 /// }
4824 ///
4825 /// let src = &[85, 85][..];
4826 /// let (_, zsty) = ZSTy::ref_from_suffix_with_elems(src, 42).unwrap();
4827 /// assert_eq!(zsty.trailing_dst.len(), 42);
4828 /// ```
4829 ///
4830 /// [`ref_from_suffix`]: FromBytes::ref_from_suffix
4831 ///
4832 #[doc = codegen_section!(
4833 header = "h5",
4834 bench = "ref_from_suffix_with_elems",
4835 format = "coco",
4836 arity = 2,
4837 [
4838 open
4839 @index 1
4840 @title "Unsized"
4841 @variant "dynamic_size"
4842 ],
4843 [
4844 @index 2
4845 @title "Dynamically Padded"
4846 @variant "dynamic_padding"
4847 ]
4848 )]
4849 #[must_use = "has no side effects"]
4850 #[cfg_attr(zerocopy_inline_always, inline(always))]
4851 #[cfg_attr(not(zerocopy_inline_always), inline)]
4852 fn ref_from_suffix_with_elems(
4853 source: &[u8],
4854 count: usize,
4855 ) -> Result<(&[u8], &Self), CastError<&[u8], Self>>
4856 where
4857 Self: KnownLayout<PointerMetadata = usize> + Immutable,
4858 {
4859 ref_from_prefix_suffix(source, Some(count), CastType::Suffix).map(swap)
4860 }
4861
4862 /// Interprets the given `source` as a `&mut Self` with a DST length equal
4863 /// to `count`.
4864 ///
4865 /// This method attempts to return a reference to `source` interpreted as a
4866 /// `Self` with `count` trailing elements. If the length of `source` is not
4867 /// equal to the size of `Self` with `count` elements, or if `source` is not
4868 /// appropriately aligned, this returns `Err`. If [`Self:
4869 /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
4870 /// error][size-error-from].
4871 ///
4872 /// [self-unaligned]: Unaligned
4873 /// [size-error-from]: error/struct.SizeError.html#method.from-1
4874 ///
4875 /// # Examples
4876 ///
4877 /// ```
4878 /// use zerocopy::FromBytes;
4879 /// # use zerocopy_derive::*;
4880 ///
4881 /// # #[derive(Debug, PartialEq, Eq)]
4882 /// #[derive(KnownLayout, FromBytes, IntoBytes, Immutable)]
4883 /// #[repr(C)]
4884 /// struct Pixel {
4885 /// r: u8,
4886 /// g: u8,
4887 /// b: u8,
4888 /// a: u8,
4889 /// }
4890 ///
4891 /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7][..];
4892 ///
4893 /// let pixels = <[Pixel]>::mut_from_bytes_with_elems(bytes, 2).unwrap();
4894 ///
4895 /// assert_eq!(pixels, &[
4896 /// Pixel { r: 0, g: 1, b: 2, a: 3 },
4897 /// Pixel { r: 4, g: 5, b: 6, a: 7 },
4898 /// ]);
4899 ///
4900 /// pixels[1] = Pixel { r: 0, g: 0, b: 0, a: 0 };
4901 ///
4902 /// assert_eq!(bytes, [0, 1, 2, 3, 0, 0, 0, 0]);
4903 /// ```
4904 ///
4905 /// Since an explicit `count` is provided, this method supports types with
4906 /// zero-sized trailing slice elements. Methods such as [`mut_from_bytes`]
4907 /// which do not take an explicit count do not support such types.
4908 ///
4909 /// ```
4910 /// use zerocopy::*;
4911 /// # use zerocopy_derive::*;
4912 ///
4913 /// #[derive(FromBytes, IntoBytes, Immutable, KnownLayout)]
4914 /// #[repr(C, packed)]
4915 /// struct ZSTy {
4916 /// leading_sized: [u8; 2],
4917 /// trailing_dst: [()],
4918 /// }
4919 ///
4920 /// let src = &mut [85, 85][..];
4921 /// let zsty = ZSTy::mut_from_bytes_with_elems(src, 42).unwrap();
4922 /// assert_eq!(zsty.trailing_dst.len(), 42);
4923 /// ```
4924 ///
4925 /// [`mut_from_bytes`]: FromBytes::mut_from_bytes
4926 ///
4927 #[doc = codegen_header!("h5", "mut_from_bytes_with_elems")]
4928 ///
4929 /// See [`TryFromBytes::ref_from_bytes_with_elems`](#method.ref_from_bytes_with_elems.codegen).
4930 #[must_use = "has no side effects"]
4931 #[cfg_attr(zerocopy_inline_always, inline(always))]
4932 #[cfg_attr(not(zerocopy_inline_always), inline)]
4933 fn mut_from_bytes_with_elems(
4934 source: &mut [u8],
4935 count: usize,
4936 ) -> Result<&mut Self, CastError<&mut [u8], Self>>
4937 where
4938 Self: IntoBytes + KnownLayout<PointerMetadata = usize> + Immutable,
4939 {
4940 let source = Ptr::from_mut(source);
4941 let maybe_slf = source.try_cast_into_no_leftover::<_, BecauseImmutable>(Some(count));
4942 match maybe_slf {
4943 Ok(slf) => Ok(slf.recall_validity::<_, (_, (_, BecauseExclusive))>().as_mut()),
4944 Err(err) => Err(err.map_src(|s| s.as_mut())),
4945 }
4946 }
4947
4948 /// Interprets the prefix of the given `source` as a `&mut Self` with DST
4949 /// length equal to `count`.
4950 ///
4951 /// This method attempts to return a reference to the prefix of `source`
4952 /// interpreted as a `Self` with `count` trailing elements, and a reference
4953 /// to the preceding bytes. If there are insufficient bytes, or if `source`
4954 /// is not appropriately aligned, this returns `Err`. If [`Self:
4955 /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
4956 /// error][size-error-from].
4957 ///
4958 /// [self-unaligned]: Unaligned
4959 /// [size-error-from]: error/struct.SizeError.html#method.from-1
4960 ///
4961 /// # Examples
4962 ///
4963 /// ```
4964 /// use zerocopy::FromBytes;
4965 /// # use zerocopy_derive::*;
4966 ///
4967 /// # #[derive(Debug, PartialEq, Eq)]
4968 /// #[derive(KnownLayout, FromBytes, IntoBytes, Immutable)]
4969 /// #[repr(C)]
4970 /// struct Pixel {
4971 /// r: u8,
4972 /// g: u8,
4973 /// b: u8,
4974 /// a: u8,
4975 /// }
4976 ///
4977 /// // These are more bytes than are needed to encode two `Pixel`s.
4978 /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4979 ///
4980 /// let (pixels, suffix) = <[Pixel]>::mut_from_prefix_with_elems(bytes, 2).unwrap();
4981 ///
4982 /// assert_eq!(pixels, &[
4983 /// Pixel { r: 0, g: 1, b: 2, a: 3 },
4984 /// Pixel { r: 4, g: 5, b: 6, a: 7 },
4985 /// ]);
4986 ///
4987 /// assert_eq!(suffix, &[8, 9]);
4988 ///
4989 /// pixels[1] = Pixel { r: 0, g: 0, b: 0, a: 0 };
4990 /// suffix.fill(1);
4991 ///
4992 /// assert_eq!(bytes, [0, 1, 2, 3, 0, 0, 0, 0, 1, 1]);
4993 /// ```
4994 ///
4995 /// Since an explicit `count` is provided, this method supports types with
4996 /// zero-sized trailing slice elements. Methods such as [`mut_from_prefix`]
4997 /// which do not take an explicit count do not support such types.
4998 ///
4999 /// ```
5000 /// use zerocopy::*;
5001 /// # use zerocopy_derive::*;
5002 ///
5003 /// #[derive(FromBytes, IntoBytes, Immutable, KnownLayout)]
5004 /// #[repr(C, packed)]
5005 /// struct ZSTy {
5006 /// leading_sized: [u8; 2],
5007 /// trailing_dst: [()],
5008 /// }
5009 ///
5010 /// let src = &mut [85, 85][..];
5011 /// let (zsty, _) = ZSTy::mut_from_prefix_with_elems(src, 42).unwrap();
5012 /// assert_eq!(zsty.trailing_dst.len(), 42);
5013 /// ```
5014 ///
5015 /// [`mut_from_prefix`]: FromBytes::mut_from_prefix
5016 ///
5017 #[doc = codegen_header!("h5", "mut_from_prefix_with_elems")]
5018 ///
5019 /// See [`TryFromBytes::ref_from_prefix_with_elems`](#method.ref_from_prefix_with_elems.codegen).
5020 #[must_use = "has no side effects"]
5021 #[cfg_attr(zerocopy_inline_always, inline(always))]
5022 #[cfg_attr(not(zerocopy_inline_always), inline)]
5023 fn mut_from_prefix_with_elems(
5024 source: &mut [u8],
5025 count: usize,
5026 ) -> Result<(&mut Self, &mut [u8]), CastError<&mut [u8], Self>>
5027 where
5028 Self: IntoBytes + KnownLayout<PointerMetadata = usize>,
5029 {
5030 mut_from_prefix_suffix(source, Some(count), CastType::Prefix)
5031 }
5032
5033 /// Interprets the suffix of the given `source` as a `&mut Self` with DST
5034 /// length equal to `count`.
5035 ///
5036 /// This method attempts to return a reference to the suffix of `source`
5037 /// interpreted as a `Self` with `count` trailing elements, and a reference
5038 /// to the remaining bytes. If there are insufficient bytes, or if that
5039 /// suffix of `source` is not appropriately aligned, this returns `Err`. If
5040 /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
5041 /// alignment error][size-error-from].
5042 ///
5043 /// [self-unaligned]: Unaligned
5044 /// [size-error-from]: error/struct.SizeError.html#method.from-1
5045 ///
5046 /// # Examples
5047 ///
5048 /// ```
5049 /// use zerocopy::FromBytes;
5050 /// # use zerocopy_derive::*;
5051 ///
5052 /// # #[derive(Debug, PartialEq, Eq)]
5053 /// #[derive(FromBytes, IntoBytes, Immutable)]
5054 /// #[repr(C)]
5055 /// struct Pixel {
5056 /// r: u8,
5057 /// g: u8,
5058 /// b: u8,
5059 /// a: u8,
5060 /// }
5061 ///
5062 /// // These are more bytes than are needed to encode two `Pixel`s.
5063 /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
5064 ///
5065 /// let (prefix, pixels) = <[Pixel]>::mut_from_suffix_with_elems(bytes, 2).unwrap();
5066 ///
5067 /// assert_eq!(prefix, &[0, 1]);
5068 ///
5069 /// assert_eq!(pixels, &[
5070 /// Pixel { r: 2, g: 3, b: 4, a: 5 },
5071 /// Pixel { r: 6, g: 7, b: 8, a: 9 },
5072 /// ]);
5073 ///
5074 /// prefix.fill(9);
5075 /// pixels[1] = Pixel { r: 0, g: 0, b: 0, a: 0 };
5076 ///
5077 /// assert_eq!(bytes, [9, 9, 2, 3, 4, 5, 0, 0, 0, 0]);
5078 /// ```
5079 ///
5080 /// Since an explicit `count` is provided, this method supports types with
5081 /// zero-sized trailing slice elements. Methods such as [`mut_from_suffix`]
5082 /// which do not take an explicit count do not support such types.
5083 ///
5084 /// ```
5085 /// use zerocopy::*;
5086 /// # use zerocopy_derive::*;
5087 ///
5088 /// #[derive(FromBytes, IntoBytes, Immutable, KnownLayout)]
5089 /// #[repr(C, packed)]
5090 /// struct ZSTy {
5091 /// leading_sized: [u8; 2],
5092 /// trailing_dst: [()],
5093 /// }
5094 ///
5095 /// let src = &mut [85, 85][..];
5096 /// let (_, zsty) = ZSTy::mut_from_suffix_with_elems(src, 42).unwrap();
5097 /// assert_eq!(zsty.trailing_dst.len(), 42);
5098 /// ```
5099 ///
5100 /// [`mut_from_suffix`]: FromBytes::mut_from_suffix
5101 ///
5102 #[doc = codegen_header!("h5", "mut_from_suffix_with_elems")]
5103 ///
5104 /// See [`TryFromBytes::ref_from_suffix_with_elems`](#method.ref_from_suffix_with_elems.codegen).
5105 #[must_use = "has no side effects"]
5106 #[cfg_attr(zerocopy_inline_always, inline(always))]
5107 #[cfg_attr(not(zerocopy_inline_always), inline)]
5108 fn mut_from_suffix_with_elems(
5109 source: &mut [u8],
5110 count: usize,
5111 ) -> Result<(&mut [u8], &mut Self), CastError<&mut [u8], Self>>
5112 where
5113 Self: IntoBytes + KnownLayout<PointerMetadata = usize>,
5114 {
5115 mut_from_prefix_suffix(source, Some(count), CastType::Suffix).map(swap)
5116 }
5117
5118 /// Reads a copy of `Self` from the given `source`.
5119 ///
5120 /// If `source.len() != size_of::<Self>()`, `read_from_bytes` returns `Err`.
5121 ///
5122 /// # Examples
5123 ///
5124 /// ```
5125 /// use zerocopy::FromBytes;
5126 /// # use zerocopy_derive::*;
5127 ///
5128 /// #[derive(FromBytes)]
5129 /// #[repr(C)]
5130 /// struct PacketHeader {
5131 /// src_port: [u8; 2],
5132 /// dst_port: [u8; 2],
5133 /// length: [u8; 2],
5134 /// checksum: [u8; 2],
5135 /// }
5136 ///
5137 /// // These bytes encode a `PacketHeader`.
5138 /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7][..];
5139 ///
5140 /// let header = PacketHeader::read_from_bytes(bytes).unwrap();
5141 ///
5142 /// assert_eq!(header.src_port, [0, 1]);
5143 /// assert_eq!(header.dst_port, [2, 3]);
5144 /// assert_eq!(header.length, [4, 5]);
5145 /// assert_eq!(header.checksum, [6, 7]);
5146 /// ```
5147 ///
5148 #[doc = codegen_section!(
5149 header = "h5",
5150 bench = "read_from_bytes",
5151 format = "coco_static_size",
5152 )]
5153 #[must_use = "has no side effects"]
5154 #[cfg_attr(zerocopy_inline_always, inline(always))]
5155 #[cfg_attr(not(zerocopy_inline_always), inline)]
5156 fn read_from_bytes(source: &[u8]) -> Result<Self, SizeError<&[u8], Self>>
5157 where
5158 Self: Sized,
5159 {
5160 match Ref::<_, Unalign<Self>>::sized_from(source) {
5161 Ok(r) => Ok(Ref::read(&r).into_inner()),
5162 Err(CastError::Size(e)) => Err(e.with_dst()),
5163 Err(CastError::Alignment(_)) => {
5164 // SAFETY: `Unalign<Self>` is trivially aligned, so
5165 // `Ref::sized_from` cannot fail due to unmet alignment
5166 // requirements.
5167 unsafe { core::hint::unreachable_unchecked() }
5168 }
5169 Err(CastError::Validity(i)) => match i {},
5170 }
5171 }
5172
5173 /// Reads a copy of `Self` from the prefix of the given `source`.
5174 ///
5175 /// This attempts to read a `Self` from the first `size_of::<Self>()` bytes
5176 /// of `source`, returning that `Self` and any remaining bytes. If
5177 /// `source.len() < size_of::<Self>()`, it returns `Err`.
5178 ///
5179 /// # Examples
5180 ///
5181 /// ```
5182 /// use zerocopy::FromBytes;
5183 /// # use zerocopy_derive::*;
5184 ///
5185 /// #[derive(FromBytes)]
5186 /// #[repr(C)]
5187 /// struct PacketHeader {
5188 /// src_port: [u8; 2],
5189 /// dst_port: [u8; 2],
5190 /// length: [u8; 2],
5191 /// checksum: [u8; 2],
5192 /// }
5193 ///
5194 /// // These are more bytes than are needed to encode a `PacketHeader`.
5195 /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
5196 ///
5197 /// let (header, body) = PacketHeader::read_from_prefix(bytes).unwrap();
5198 ///
5199 /// assert_eq!(header.src_port, [0, 1]);
5200 /// assert_eq!(header.dst_port, [2, 3]);
5201 /// assert_eq!(header.length, [4, 5]);
5202 /// assert_eq!(header.checksum, [6, 7]);
5203 /// assert_eq!(body, [8, 9]);
5204 /// ```
5205 ///
5206 #[doc = codegen_section!(
5207 header = "h5",
5208 bench = "read_from_prefix",
5209 format = "coco_static_size",
5210 )]
5211 #[must_use = "has no side effects"]
5212 #[cfg_attr(zerocopy_inline_always, inline(always))]
5213 #[cfg_attr(not(zerocopy_inline_always), inline)]
5214 fn read_from_prefix(source: &[u8]) -> Result<(Self, &[u8]), SizeError<&[u8], Self>>
5215 where
5216 Self: Sized,
5217 {
5218 match Ref::<_, Unalign<Self>>::sized_from_prefix(source) {
5219 Ok((r, suffix)) => Ok((Ref::read(&r).into_inner(), suffix)),
5220 Err(CastError::Size(e)) => Err(e.with_dst()),
5221 Err(CastError::Alignment(_)) => {
5222 // SAFETY: `Unalign<Self>` is trivially aligned, so
5223 // `Ref::sized_from_prefix` cannot fail due to unmet alignment
5224 // requirements.
5225 unsafe { core::hint::unreachable_unchecked() }
5226 }
5227 Err(CastError::Validity(i)) => match i {},
5228 }
5229 }
5230
5231 /// Reads a copy of `Self` from the suffix of the given `source`.
5232 ///
5233 /// This attempts to read a `Self` from the last `size_of::<Self>()` bytes
5234 /// of `source`, returning that `Self` and any preceding bytes. If
5235 /// `source.len() < size_of::<Self>()`, it returns `Err`.
5236 ///
5237 /// # Examples
5238 ///
5239 /// ```
5240 /// use zerocopy::FromBytes;
5241 /// # use zerocopy_derive::*;
5242 ///
5243 /// #[derive(FromBytes)]
5244 /// #[repr(C)]
5245 /// struct PacketTrailer {
5246 /// frame_check_sequence: [u8; 4],
5247 /// }
5248 ///
5249 /// // These are more bytes than are needed to encode a `PacketTrailer`.
5250 /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
5251 ///
5252 /// let (prefix, trailer) = PacketTrailer::read_from_suffix(bytes).unwrap();
5253 ///
5254 /// assert_eq!(prefix, [0, 1, 2, 3, 4, 5]);
5255 /// assert_eq!(trailer.frame_check_sequence, [6, 7, 8, 9]);
5256 /// ```
5257 ///
5258 #[doc = codegen_section!(
5259 header = "h5",
5260 bench = "read_from_suffix",
5261 format = "coco_static_size",
5262 )]
5263 #[must_use = "has no side effects"]
5264 #[cfg_attr(zerocopy_inline_always, inline(always))]
5265 #[cfg_attr(not(zerocopy_inline_always), inline)]
5266 fn read_from_suffix(source: &[u8]) -> Result<(&[u8], Self), SizeError<&[u8], Self>>
5267 where
5268 Self: Sized,
5269 {
5270 match Ref::<_, Unalign<Self>>::sized_from_suffix(source) {
5271 Ok((prefix, r)) => Ok((prefix, Ref::read(&r).into_inner())),
5272 Err(CastError::Size(e)) => Err(e.with_dst()),
5273 Err(CastError::Alignment(_)) => {
5274 // SAFETY: `Unalign<Self>` is trivially aligned, so
5275 // `Ref::sized_from_suffix` cannot fail due to unmet alignment
5276 // requirements.
5277 unsafe { core::hint::unreachable_unchecked() }
5278 }
5279 Err(CastError::Validity(i)) => match i {},
5280 }
5281 }
5282
5283 /// Reads a copy of `self` from an `io::Read`.
5284 ///
5285 /// This is useful for interfacing with operating system byte sinks (files,
5286 /// sockets, etc.).
5287 ///
5288 /// # Examples
5289 ///
5290 /// ```no_run
5291 /// use zerocopy::{byteorder::big_endian::*, FromBytes};
5292 /// use std::fs::File;
5293 /// # use zerocopy_derive::*;
5294 ///
5295 /// #[derive(FromBytes)]
5296 /// #[repr(C)]
5297 /// struct BitmapFileHeader {
5298 /// signature: [u8; 2],
5299 /// size: U32,
5300 /// reserved: U64,
5301 /// offset: U64,
5302 /// }
5303 ///
5304 /// let mut file = File::open("image.bin").unwrap();
5305 /// let header = BitmapFileHeader::read_from_io(&mut file).unwrap();
5306 /// ```
5307 #[cfg(feature = "std")]
5308 #[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
5309 #[inline(always)]
5310 fn read_from_io<R>(mut src: R) -> io::Result<Self>
5311 where
5312 Self: Sized,
5313 R: io::Read,
5314 {
5315 // NOTE(#2319, #2320): We do `buf.zero()` separately rather than
5316 // constructing `let buf = CoreMaybeUninit::zeroed()` because, if `Self`
5317 // contains padding bytes, then a typed copy of `CoreMaybeUninit<Self>`
5318 // will not necessarily preserve zeros written to those padding byte
5319 // locations, and so `buf` could contain uninitialized bytes.
5320 let mut buf = CoreMaybeUninit::<Self>::uninit();
5321 buf.zero();
5322
5323 let ptr = Ptr::from_mut(&mut buf);
5324 // SAFETY: After `buf.zero()`, `buf` consists entirely of initialized,
5325 // zeroed bytes. Since `MaybeUninit` has no validity requirements, `ptr`
5326 // cannot be used to write values which will violate `buf`'s bit
5327 // validity. Since `ptr` has `Exclusive` aliasing, nothing other than
5328 // `ptr` may be used to mutate `ptr`'s referent, and so its bit validity
5329 // cannot be violated even though `buf` may have more permissive bit
5330 // validity than `ptr`.
5331 let ptr = unsafe { ptr.assume_validity::<invariant::Initialized>() };
5332 let ptr = ptr.as_bytes();
5333 src.read_exact(ptr.as_mut())?;
5334 // SAFETY: `buf` entirely consists of initialized bytes, and `Self` is
5335 // `FromBytes`.
5336 Ok(unsafe { buf.assume_init() })
5337 }
5338
5339 #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::ref_from_bytes`")]
5340 #[doc(hidden)]
5341 #[must_use = "has no side effects"]
5342 #[inline(always)]
5343 fn ref_from(source: &[u8]) -> Option<&Self>
5344 where
5345 Self: KnownLayout + Immutable,
5346 {
5347 Self::ref_from_bytes(source).ok()
5348 }
5349
5350 #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::mut_from_bytes`")]
5351 #[doc(hidden)]
5352 #[must_use = "has no side effects"]
5353 #[inline(always)]
5354 fn mut_from(source: &mut [u8]) -> Option<&mut Self>
5355 where
5356 Self: KnownLayout + IntoBytes,
5357 {
5358 Self::mut_from_bytes(source).ok()
5359 }
5360
5361 #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::ref_from_prefix_with_elems`")]
5362 #[doc(hidden)]
5363 #[must_use = "has no side effects"]
5364 #[inline(always)]
5365 fn slice_from_prefix(source: &[u8], count: usize) -> Option<(&[Self], &[u8])>
5366 where
5367 Self: Sized + Immutable,
5368 {
5369 <[Self]>::ref_from_prefix_with_elems(source, count).ok()
5370 }
5371
5372 #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::ref_from_suffix_with_elems`")]
5373 #[doc(hidden)]
5374 #[must_use = "has no side effects"]
5375 #[inline(always)]
5376 fn slice_from_suffix(source: &[u8], count: usize) -> Option<(&[u8], &[Self])>
5377 where
5378 Self: Sized + Immutable,
5379 {
5380 <[Self]>::ref_from_suffix_with_elems(source, count).ok()
5381 }
5382
5383 #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::mut_from_prefix_with_elems`")]
5384 #[doc(hidden)]
5385 #[must_use = "has no side effects"]
5386 #[inline(always)]
5387 fn mut_slice_from_prefix(source: &mut [u8], count: usize) -> Option<(&mut [Self], &mut [u8])>
5388 where
5389 Self: Sized + IntoBytes,
5390 {
5391 <[Self]>::mut_from_prefix_with_elems(source, count).ok()
5392 }
5393
5394 #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::mut_from_suffix_with_elems`")]
5395 #[doc(hidden)]
5396 #[must_use = "has no side effects"]
5397 #[inline(always)]
5398 fn mut_slice_from_suffix(source: &mut [u8], count: usize) -> Option<(&mut [u8], &mut [Self])>
5399 where
5400 Self: Sized + IntoBytes,
5401 {
5402 <[Self]>::mut_from_suffix_with_elems(source, count).ok()
5403 }
5404
5405 #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::read_from_bytes`")]
5406 #[doc(hidden)]
5407 #[must_use = "has no side effects"]
5408 #[inline(always)]
5409 fn read_from(source: &[u8]) -> Option<Self>
5410 where
5411 Self: Sized,
5412 {
5413 Self::read_from_bytes(source).ok()
5414 }
5415}
5416
5417/// Interprets the given affix of the given bytes as a `&Self`.
5418///
5419/// This method computes the largest possible size of `Self` that can fit in the
5420/// prefix or suffix bytes of `source`, then attempts to return both a reference
5421/// to those bytes interpreted as a `Self`, and a reference to the excess bytes.
5422/// If there are insufficient bytes, or if that affix of `source` is not
5423/// appropriately aligned, this returns `Err`.
5424#[inline(always)]
5425fn ref_from_prefix_suffix<T: FromBytes + KnownLayout + Immutable + ?Sized>(
5426 source: &[u8],
5427 meta: Option<T::PointerMetadata>,
5428 cast_type: CastType,
5429) -> Result<(&T, &[u8]), CastError<&[u8], T>> {
5430 let (slf, prefix_suffix) = Ptr::from_ref(source)
5431 .try_cast_into::<_, BecauseImmutable>(cast_type, meta)
5432 .map_err(|err| err.map_src(|s| s.as_ref()))?;
5433 Ok((slf.recall_validity().as_ref(), prefix_suffix.as_ref()))
5434}
5435
5436/// Interprets the given affix of the given bytes as a `&mut Self` without
5437/// copying.
5438///
5439/// This method computes the largest possible size of `Self` that can fit in the
5440/// prefix or suffix bytes of `source`, then attempts to return both a reference
5441/// to those bytes interpreted as a `Self`, and a reference to the excess bytes.
5442/// If there are insufficient bytes, or if that affix of `source` is not
5443/// appropriately aligned, this returns `Err`.
5444#[inline(always)]
5445fn mut_from_prefix_suffix<T: FromBytes + IntoBytes + KnownLayout + ?Sized>(
5446 source: &mut [u8],
5447 meta: Option<T::PointerMetadata>,
5448 cast_type: CastType,
5449) -> Result<(&mut T, &mut [u8]), CastError<&mut [u8], T>> {
5450 let (slf, prefix_suffix) = Ptr::from_mut(source)
5451 .try_cast_into::<_, BecauseExclusive>(cast_type, meta)
5452 .map_err(|err| err.map_src(|s| s.as_mut()))?;
5453 Ok((slf.recall_validity::<_, (_, (_, _))>().as_mut(), prefix_suffix.as_mut()))
5454}
5455
5456/// Analyzes whether a type is [`IntoBytes`].
5457///
5458/// This derive analyzes, at compile time, whether the annotated type satisfies
5459/// the [safety conditions] of `IntoBytes` and implements `IntoBytes` if it is
5460/// sound to do so. This derive can be applied to structs and enums (see below
5461/// for union support); e.g.:
5462///
5463/// ```
5464/// # use zerocopy_derive::{IntoBytes};
5465/// #[derive(IntoBytes)]
5466/// #[repr(C)]
5467/// struct MyStruct {
5468/// # /*
5469/// ...
5470/// # */
5471/// }
5472///
5473/// #[derive(IntoBytes)]
5474/// #[repr(u8)]
5475/// enum MyEnum {
5476/// # Variant,
5477/// # /*
5478/// ...
5479/// # */
5480/// }
5481/// ```
5482///
5483/// [safety conditions]: trait@IntoBytes#safety
5484///
5485/// # Error Messages
5486///
5487/// On Rust toolchains prior to 1.78.0, due to the way that the custom derive
5488/// for `IntoBytes` is implemented, you may get an error like this:
5489///
5490/// ```text
5491/// error[E0277]: the trait bound `(): PaddingFree<Foo, true>` is not satisfied
5492/// --> lib.rs:23:10
5493/// |
5494/// 1 | #[derive(IntoBytes)]
5495/// | ^^^^^^^^^ the trait `PaddingFree<Foo, true>` is not implemented for `()`
5496/// |
5497/// = help: the following implementations were found:
5498/// <() as PaddingFree<T, false>>
5499/// ```
5500///
5501/// This error indicates that the type being annotated has padding bytes, which
5502/// is illegal for `IntoBytes` types. Consider reducing the alignment of some
5503/// fields by using types in the [`byteorder`] module, wrapping field types in
5504/// [`Unalign`], adding explicit struct fields where those padding bytes would
5505/// be, or using `#[repr(packed)]`. See the Rust Reference's page on [type
5506/// layout] for more information about type layout and padding.
5507///
5508/// [type layout]: https://doc.rust-lang.org/reference/type-layout.html
5509///
5510/// # Unions
5511///
5512/// Currently, union bit validity is [up in the air][union-validity], and so
5513/// zerocopy does not support `#[derive(IntoBytes)]` on unions by default.
5514/// However, implementing `IntoBytes` on a union type is likely sound on all
5515/// existing Rust toolchains - it's just that it may become unsound in the
5516/// future. You can opt-in to `#[derive(IntoBytes)]` support on unions by
5517/// passing the unstable `zerocopy_derive_union_into_bytes` cfg:
5518///
5519/// ```shell
5520/// $ RUSTFLAGS='--cfg zerocopy_derive_union_into_bytes' cargo build
5521/// ```
5522///
5523/// However, it is your responsibility to ensure that this derive is sound on
5524/// the specific versions of the Rust toolchain you are using! We make no
5525/// stability or soundness guarantees regarding this cfg, and may remove it at
5526/// any point.
5527///
5528/// We are actively working with Rust to stabilize the necessary language
5529/// guarantees to support this in a forwards-compatible way, which will enable
5530/// us to remove the cfg gate. As part of this effort, we need to know how much
5531/// demand there is for this feature. If you would like to use `IntoBytes` on
5532/// unions, [please let us know][discussion].
5533///
5534/// [union-validity]: https://github.com/rust-lang/unsafe-code-guidelines/issues/438
5535/// [discussion]: https://github.com/google/zerocopy/discussions/1802
5536///
5537/// # Analysis
5538///
5539/// *This section describes, roughly, the analysis performed by this derive to
5540/// determine whether it is sound to implement `IntoBytes` for a given type.
5541/// Unless you are modifying the implementation of this derive, or attempting to
5542/// manually implement `IntoBytes` for a type yourself, you don't need to read
5543/// this section.*
5544///
5545/// If a type has the following properties, then this derive can implement
5546/// `IntoBytes` for that type:
5547///
5548/// - If the type is a struct, its fields must be [`IntoBytes`]. Additionally:
5549/// - if the type is `repr(transparent)` or `repr(packed)`, it is
5550/// [`IntoBytes`] if its fields are [`IntoBytes`]; else,
5551/// - if the type is `repr(C)` with at most one field, it is [`IntoBytes`]
5552/// if its field is [`IntoBytes`]; else,
5553/// - if the type has no generic parameters, it is [`IntoBytes`] if the type
5554/// is sized and has no padding bytes; else,
5555/// - if the type is `repr(C)`, its fields must be [`Unaligned`].
5556/// - If the type is an enum:
5557/// - It must have a defined representation (`repr`s `C`, `u8`, `u16`, `u32`,
5558/// `u64`, `usize`, `i8`, `i16`, `i32`, `i64`, or `isize`).
5559/// - It must have no padding bytes.
5560/// - Its fields must be [`IntoBytes`].
5561///
5562/// This analysis is subject to change. Unsafe code may *only* rely on the
5563/// documented [safety conditions] of `FromBytes`, and must *not* rely on the
5564/// implementation details of this derive.
5565///
5566/// [Rust Reference]: https://doc.rust-lang.org/reference/type-layout.html
5567#[cfg(any(feature = "derive", test))]
5568#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
5569pub use zerocopy_derive::IntoBytes;
5570
5571/// Types that can be converted to an immutable slice of initialized bytes.
5572///
5573/// Any `IntoBytes` type can be converted to a slice of initialized bytes of the
5574/// same size. This is useful for efficiently serializing structured data as raw
5575/// bytes.
5576///
5577/// # Implementation
5578///
5579/// **Do not implement this trait yourself!** Instead, use
5580/// [`#[derive(IntoBytes)]`][derive]; e.g.:
5581///
5582/// ```
5583/// # use zerocopy_derive::IntoBytes;
5584/// #[derive(IntoBytes)]
5585/// #[repr(C)]
5586/// struct MyStruct {
5587/// # /*
5588/// ...
5589/// # */
5590/// }
5591///
5592/// #[derive(IntoBytes)]
5593/// #[repr(u8)]
5594/// enum MyEnum {
5595/// # Variant0,
5596/// # /*
5597/// ...
5598/// # */
5599/// }
5600/// ```
5601///
5602/// This derive performs a sophisticated, compile-time safety analysis to
5603/// determine whether a type is `IntoBytes`. See the [derive
5604/// documentation][derive] for guidance on how to interpret error messages
5605/// produced by the derive's analysis.
5606///
5607/// # Safety
5608///
5609/// *This section describes what is required in order for `T: IntoBytes`, and
5610/// what unsafe code may assume of such types. If you don't plan on implementing
5611/// `IntoBytes` manually, and you don't plan on writing unsafe code that
5612/// operates on `IntoBytes` types, then you don't need to read this section.*
5613///
5614/// If `T: IntoBytes`, then unsafe code may assume that it is sound to treat any
5615/// `t: T` as an immutable `[u8]` of length `size_of_val(t)`. If a type is
5616/// marked as `IntoBytes` which violates this contract, it may cause undefined
5617/// behavior.
5618///
5619/// `#[derive(IntoBytes)]` only permits [types which satisfy these
5620/// requirements][derive-analysis].
5621///
5622#[cfg_attr(
5623 feature = "derive",
5624 doc = "[derive]: zerocopy_derive::IntoBytes",
5625 doc = "[derive-analysis]: zerocopy_derive::IntoBytes#analysis"
5626)]
5627#[cfg_attr(
5628 not(feature = "derive"),
5629 doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.IntoBytes.html"),
5630 doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.IntoBytes.html#analysis"),
5631)]
5632#[cfg_attr(
5633 not(no_zerocopy_diagnostic_on_unimplemented_1_78_0),
5634 diagnostic::on_unimplemented(note = "Consider adding `#[derive(IntoBytes)]` to `{Self}`")
5635)]
5636pub unsafe trait IntoBytes {
5637 // The `Self: Sized` bound makes it so that this function doesn't prevent
5638 // `IntoBytes` from being object safe. Note that other `IntoBytes` methods
5639 // prevent object safety, but those provide a benefit in exchange for object
5640 // safety. If at some point we remove those methods, change their type
5641 // signatures, or move them out of this trait so that `IntoBytes` is object
5642 // safe again, it's important that this function not prevent object safety.
5643 #[doc(hidden)]
5644 fn only_derive_is_allowed_to_implement_this_trait()
5645 where
5646 Self: Sized;
5647
5648 /// Gets the bytes of this value.
5649 ///
5650 /// # Examples
5651 ///
5652 /// ```
5653 /// use zerocopy::IntoBytes;
5654 /// # use zerocopy_derive::*;
5655 ///
5656 /// #[derive(IntoBytes, Immutable)]
5657 /// #[repr(C)]
5658 /// struct PacketHeader {
5659 /// src_port: [u8; 2],
5660 /// dst_port: [u8; 2],
5661 /// length: [u8; 2],
5662 /// checksum: [u8; 2],
5663 /// }
5664 ///
5665 /// let header = PacketHeader {
5666 /// src_port: [0, 1],
5667 /// dst_port: [2, 3],
5668 /// length: [4, 5],
5669 /// checksum: [6, 7],
5670 /// };
5671 ///
5672 /// let bytes = header.as_bytes();
5673 ///
5674 /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7]);
5675 /// ```
5676 ///
5677 #[doc = codegen_section!(
5678 header = "h5",
5679 bench = "as_bytes",
5680 format = "coco",
5681 arity = 2,
5682 [
5683 open
5684 @index 1
5685 @title "Sized"
5686 @variant "static_size"
5687 ],
5688 [
5689 @index 2
5690 @title "Unsized"
5691 @variant "dynamic_size"
5692 ]
5693 )]
5694 #[must_use = "has no side effects"]
5695 #[inline(always)]
5696 fn as_bytes(&self) -> &[u8]
5697 where
5698 Self: Immutable,
5699 {
5700 // Note that this method does not have a `Self: Sized` bound;
5701 // `size_of_val` works for unsized values too.
5702 let len = mem::size_of_val(self);
5703 let slf: *const Self = self;
5704
5705 // SAFETY:
5706 // - `slf.cast::<u8>()` is valid for reads for `len * size_of::<u8>()`
5707 // many bytes because...
5708 // - `slf` is the same pointer as `self`, and `self` is a reference
5709 // which points to an object whose size is `len`. Thus...
5710 // - The entire region of `len` bytes starting at `slf` is contained
5711 // within a single allocation.
5712 // - `slf` is non-null.
5713 // - `slf` is trivially aligned to `align_of::<u8>() == 1`.
5714 // - `Self: IntoBytes` ensures that all of the bytes of `slf` are
5715 // initialized.
5716 // - Since `slf` is derived from `self`, and `self` is an immutable
5717 // reference, the only other references to this memory region that
5718 // could exist are other immutable references, which by `Self:
5719 // Immutable` don't permit mutation.
5720 // - The total size of the resulting slice is no larger than
5721 // `isize::MAX` because no allocation produced by safe code can be
5722 // larger than `isize::MAX`.
5723 //
5724 // FIXME(#429): Add references to docs and quotes.
5725 unsafe { slice::from_raw_parts(slf.cast::<u8>(), len) }
5726 }
5727
5728 /// Gets the bytes of this value mutably.
5729 ///
5730 /// # Examples
5731 ///
5732 /// ```
5733 /// use zerocopy::IntoBytes;
5734 /// # use zerocopy_derive::*;
5735 ///
5736 /// # #[derive(Eq, PartialEq, Debug)]
5737 /// #[derive(FromBytes, IntoBytes, Immutable)]
5738 /// #[repr(C)]
5739 /// struct PacketHeader {
5740 /// src_port: [u8; 2],
5741 /// dst_port: [u8; 2],
5742 /// length: [u8; 2],
5743 /// checksum: [u8; 2],
5744 /// }
5745 ///
5746 /// let mut header = PacketHeader {
5747 /// src_port: [0, 1],
5748 /// dst_port: [2, 3],
5749 /// length: [4, 5],
5750 /// checksum: [6, 7],
5751 /// };
5752 ///
5753 /// let bytes = header.as_mut_bytes();
5754 ///
5755 /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7]);
5756 ///
5757 /// bytes.reverse();
5758 ///
5759 /// assert_eq!(header, PacketHeader {
5760 /// src_port: [7, 6],
5761 /// dst_port: [5, 4],
5762 /// length: [3, 2],
5763 /// checksum: [1, 0],
5764 /// });
5765 /// ```
5766 ///
5767 #[doc = codegen_header!("h5", "as_mut_bytes")]
5768 ///
5769 /// See [`IntoBytes::as_bytes`](#method.as_bytes.codegen).
5770 #[must_use = "has no side effects"]
5771 #[inline(always)]
5772 fn as_mut_bytes(&mut self) -> &mut [u8]
5773 where
5774 Self: FromBytes,
5775 {
5776 // Note that this method does not have a `Self: Sized` bound;
5777 // `size_of_val` works for unsized values too.
5778 let len = mem::size_of_val(self);
5779 let slf: *mut Self = self;
5780
5781 // SAFETY:
5782 // - `slf.cast::<u8>()` is valid for reads and writes for `len *
5783 // size_of::<u8>()` many bytes because...
5784 // - `slf` is the same pointer as `self`, and `self` is a reference
5785 // which points to an object whose size is `len`. Thus...
5786 // - The entire region of `len` bytes starting at `slf` is contained
5787 // within a single allocation.
5788 // - `slf` is non-null.
5789 // - `slf` is trivially aligned to `align_of::<u8>() == 1`.
5790 // - `Self: IntoBytes` ensures that all of the bytes of `slf` are
5791 // initialized.
5792 // - `Self: FromBytes` ensures that no write to this memory region
5793 // could result in it containing an invalid `Self`.
5794 // - Since `slf` is derived from `self`, and `self` is a mutable
5795 // reference, no other references to this memory region can exist.
5796 // - The total size of the resulting slice is no larger than
5797 // `isize::MAX` because no allocation produced by safe code can be
5798 // larger than `isize::MAX`.
5799 //
5800 // FIXME(#429): Add references to docs and quotes.
5801 unsafe { slice::from_raw_parts_mut(slf.cast::<u8>(), len) }
5802 }
5803
5804 /// Writes a copy of `self` to `dst`.
5805 ///
5806 /// If `dst.len() != size_of_val(self)`, `write_to` returns `Err`.
5807 ///
5808 /// # Examples
5809 ///
5810 /// ```
5811 /// use zerocopy::IntoBytes;
5812 /// # use zerocopy_derive::*;
5813 ///
5814 /// #[derive(IntoBytes, Immutable)]
5815 /// #[repr(C)]
5816 /// struct PacketHeader {
5817 /// src_port: [u8; 2],
5818 /// dst_port: [u8; 2],
5819 /// length: [u8; 2],
5820 /// checksum: [u8; 2],
5821 /// }
5822 ///
5823 /// let header = PacketHeader {
5824 /// src_port: [0, 1],
5825 /// dst_port: [2, 3],
5826 /// length: [4, 5],
5827 /// checksum: [6, 7],
5828 /// };
5829 ///
5830 /// let mut bytes = [0, 0, 0, 0, 0, 0, 0, 0];
5831 ///
5832 /// header.write_to(&mut bytes[..]);
5833 ///
5834 /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7]);
5835 /// ```
5836 ///
5837 /// If too many or too few target bytes are provided, `write_to` returns
5838 /// `Err` and leaves the target bytes unmodified:
5839 ///
5840 /// ```
5841 /// # use zerocopy::IntoBytes;
5842 /// # let header = u128::MAX;
5843 /// let mut excessive_bytes = &mut [0u8; 128][..];
5844 ///
5845 /// let write_result = header.write_to(excessive_bytes);
5846 ///
5847 /// assert!(write_result.is_err());
5848 /// assert_eq!(excessive_bytes, [0u8; 128]);
5849 /// ```
5850 ///
5851 #[doc = codegen_section!(
5852 header = "h5",
5853 bench = "write_to",
5854 format = "coco",
5855 arity = 2,
5856 [
5857 open
5858 @index 1
5859 @title "Sized"
5860 @variant "static_size"
5861 ],
5862 [
5863 @index 2
5864 @title "Unsized"
5865 @variant "dynamic_size"
5866 ]
5867 )]
5868 #[must_use = "callers should check the return value to see if the operation succeeded"]
5869 #[cfg_attr(zerocopy_inline_always, inline(always))]
5870 #[cfg_attr(not(zerocopy_inline_always), inline)]
5871 #[allow(clippy::mut_from_ref)] // False positive: `&self -> &mut [u8]`
5872 fn write_to(&self, dst: &mut [u8]) -> Result<(), SizeError<&Self, &mut [u8]>>
5873 where
5874 Self: Immutable,
5875 {
5876 let src = self.as_bytes();
5877 if dst.len() == src.len() {
5878 // SAFETY: Within this branch of the conditional, we have ensured
5879 // that `dst.len()` is equal to `src.len()`. Neither the size of the
5880 // source nor the size of the destination change between the above
5881 // size check and the invocation of `copy_unchecked`.
5882 unsafe { util::copy_unchecked(src, dst) }
5883 Ok(())
5884 } else {
5885 Err(SizeError::new(self))
5886 }
5887 }
5888
5889 /// Writes a copy of `self` to the prefix of `dst`.
5890 ///
5891 /// `write_to_prefix` writes `self` to the first `size_of_val(self)` bytes
5892 /// of `dst`. If `dst.len() < size_of_val(self)`, it returns `Err`.
5893 ///
5894 /// # Examples
5895 ///
5896 /// ```
5897 /// use zerocopy::IntoBytes;
5898 /// # use zerocopy_derive::*;
5899 ///
5900 /// #[derive(IntoBytes, Immutable)]
5901 /// #[repr(C)]
5902 /// struct PacketHeader {
5903 /// src_port: [u8; 2],
5904 /// dst_port: [u8; 2],
5905 /// length: [u8; 2],
5906 /// checksum: [u8; 2],
5907 /// }
5908 ///
5909 /// let header = PacketHeader {
5910 /// src_port: [0, 1],
5911 /// dst_port: [2, 3],
5912 /// length: [4, 5],
5913 /// checksum: [6, 7],
5914 /// };
5915 ///
5916 /// let mut bytes = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
5917 ///
5918 /// header.write_to_prefix(&mut bytes[..]);
5919 ///
5920 /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7, 0, 0]);
5921 /// ```
5922 ///
5923 /// If insufficient target bytes are provided, `write_to_prefix` returns
5924 /// `Err` and leaves the target bytes unmodified:
5925 ///
5926 /// ```
5927 /// # use zerocopy::IntoBytes;
5928 /// # let header = u128::MAX;
5929 /// let mut insufficient_bytes = &mut [0, 0][..];
5930 ///
5931 /// let write_result = header.write_to_suffix(insufficient_bytes);
5932 ///
5933 /// assert!(write_result.is_err());
5934 /// assert_eq!(insufficient_bytes, [0, 0]);
5935 /// ```
5936 ///
5937 #[doc = codegen_section!(
5938 header = "h5",
5939 bench = "write_to_prefix",
5940 format = "coco",
5941 arity = 2,
5942 [
5943 open
5944 @index 1
5945 @title "Sized"
5946 @variant "static_size"
5947 ],
5948 [
5949 @index 2
5950 @title "Unsized"
5951 @variant "dynamic_size"
5952 ]
5953 )]
5954 #[must_use = "callers should check the return value to see if the operation succeeded"]
5955 #[cfg_attr(zerocopy_inline_always, inline(always))]
5956 #[cfg_attr(not(zerocopy_inline_always), inline)]
5957 #[allow(clippy::mut_from_ref)] // False positive: `&self -> &mut [u8]`
5958 fn write_to_prefix(&self, dst: &mut [u8]) -> Result<(), SizeError<&Self, &mut [u8]>>
5959 where
5960 Self: Immutable,
5961 {
5962 let src = self.as_bytes();
5963 match dst.get_mut(..src.len()) {
5964 Some(dst) => {
5965 // SAFETY: Within this branch of the `match`, we have ensured
5966 // through fallible subslicing that `dst.len()` is equal to
5967 // `src.len()`. Neither the size of the source nor the size of
5968 // the destination change between the above subslicing operation
5969 // and the invocation of `copy_unchecked`.
5970 unsafe { util::copy_unchecked(src, dst) }
5971 Ok(())
5972 }
5973 None => Err(SizeError::new(self)),
5974 }
5975 }
5976
5977 /// Writes a copy of `self` to the suffix of `dst`.
5978 ///
5979 /// `write_to_suffix` writes `self` to the last `size_of_val(self)` bytes of
5980 /// `dst`. If `dst.len() < size_of_val(self)`, it returns `Err`.
5981 ///
5982 /// # Examples
5983 ///
5984 /// ```
5985 /// use zerocopy::IntoBytes;
5986 /// # use zerocopy_derive::*;
5987 ///
5988 /// #[derive(IntoBytes, Immutable)]
5989 /// #[repr(C)]
5990 /// struct PacketHeader {
5991 /// src_port: [u8; 2],
5992 /// dst_port: [u8; 2],
5993 /// length: [u8; 2],
5994 /// checksum: [u8; 2],
5995 /// }
5996 ///
5997 /// let header = PacketHeader {
5998 /// src_port: [0, 1],
5999 /// dst_port: [2, 3],
6000 /// length: [4, 5],
6001 /// checksum: [6, 7],
6002 /// };
6003 ///
6004 /// let mut bytes = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
6005 ///
6006 /// header.write_to_suffix(&mut bytes[..]);
6007 ///
6008 /// assert_eq!(bytes, [0, 0, 0, 1, 2, 3, 4, 5, 6, 7]);
6009 ///
6010 /// let mut insufficient_bytes = &mut [0, 0][..];
6011 ///
6012 /// let write_result = header.write_to_suffix(insufficient_bytes);
6013 ///
6014 /// assert!(write_result.is_err());
6015 /// assert_eq!(insufficient_bytes, [0, 0]);
6016 /// ```
6017 ///
6018 /// If insufficient target bytes are provided, `write_to_suffix` returns
6019 /// `Err` and leaves the target bytes unmodified:
6020 ///
6021 /// ```
6022 /// # use zerocopy::IntoBytes;
6023 /// # let header = u128::MAX;
6024 /// let mut insufficient_bytes = &mut [0, 0][..];
6025 ///
6026 /// let write_result = header.write_to_suffix(insufficient_bytes);
6027 ///
6028 /// assert!(write_result.is_err());
6029 /// assert_eq!(insufficient_bytes, [0, 0]);
6030 /// ```
6031 ///
6032 #[doc = codegen_section!(
6033 header = "h5",
6034 bench = "write_to_suffix",
6035 format = "coco",
6036 arity = 2,
6037 [
6038 open
6039 @index 1
6040 @title "Sized"
6041 @variant "static_size"
6042 ],
6043 [
6044 @index 2
6045 @title "Unsized"
6046 @variant "dynamic_size"
6047 ]
6048 )]
6049 #[must_use = "callers should check the return value to see if the operation succeeded"]
6050 #[cfg_attr(zerocopy_inline_always, inline(always))]
6051 #[cfg_attr(not(zerocopy_inline_always), inline)]
6052 #[allow(clippy::mut_from_ref)] // False positive: `&self -> &mut [u8]`
6053 fn write_to_suffix(&self, dst: &mut [u8]) -> Result<(), SizeError<&Self, &mut [u8]>>
6054 where
6055 Self: Immutable,
6056 {
6057 let src = self.as_bytes();
6058 let start = if let Some(start) = dst.len().checked_sub(src.len()) {
6059 start
6060 } else {
6061 return Err(SizeError::new(self));
6062 };
6063 let dst = if let Some(dst) = dst.get_mut(start..) {
6064 dst
6065 } else {
6066 // get_mut() should never return None here. We return a `SizeError`
6067 // rather than .unwrap() because in the event the branch is not
6068 // optimized away, returning a value is generally lighter-weight
6069 // than panicking.
6070 return Err(SizeError::new(self));
6071 };
6072 // SAFETY: Through fallible subslicing of `dst`, we have ensured that
6073 // `dst.len()` is equal to `src.len()`. Neither the size of the source
6074 // nor the size of the destination change between the above subslicing
6075 // operation and the invocation of `copy_unchecked`.
6076 unsafe {
6077 util::copy_unchecked(src, dst);
6078 }
6079 Ok(())
6080 }
6081
6082 /// Writes a copy of `self` to an `io::Write`.
6083 ///
6084 /// This is a shorthand for `dst.write_all(self.as_bytes())`, and is useful
6085 /// for interfacing with operating system byte sinks (files, sockets, etc.).
6086 ///
6087 /// # Examples
6088 ///
6089 /// ```no_run
6090 /// use zerocopy::{byteorder::big_endian::U16, FromBytes, IntoBytes};
6091 /// use std::fs::File;
6092 /// # use zerocopy_derive::*;
6093 ///
6094 /// #[derive(FromBytes, IntoBytes, Immutable, KnownLayout)]
6095 /// #[repr(C, packed)]
6096 /// struct GrayscaleImage {
6097 /// height: U16,
6098 /// width: U16,
6099 /// pixels: [U16],
6100 /// }
6101 ///
6102 /// let image = GrayscaleImage::ref_from_bytes(&[0, 0, 0, 0][..]).unwrap();
6103 /// let mut file = File::create("image.bin").unwrap();
6104 /// image.write_to_io(&mut file).unwrap();
6105 /// ```
6106 ///
6107 /// If the write fails, `write_to_io` returns `Err` and a partial write may
6108 /// have occurred; e.g.:
6109 ///
6110 /// ```
6111 /// # use zerocopy::IntoBytes;
6112 ///
6113 /// let src = u128::MAX;
6114 /// let mut dst = [0u8; 2];
6115 ///
6116 /// let write_result = src.write_to_io(&mut dst[..]);
6117 ///
6118 /// assert!(write_result.is_err());
6119 /// assert_eq!(dst, [255, 255]);
6120 /// ```
6121 #[cfg(feature = "std")]
6122 #[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
6123 #[inline(always)]
6124 fn write_to_io<W>(&self, mut dst: W) -> io::Result<()>
6125 where
6126 Self: Immutable,
6127 W: io::Write,
6128 {
6129 dst.write_all(self.as_bytes())
6130 }
6131
6132 #[deprecated(since = "0.8.0", note = "`IntoBytes::as_bytes_mut` was renamed to `as_mut_bytes`")]
6133 #[doc(hidden)]
6134 #[inline]
6135 fn as_bytes_mut(&mut self) -> &mut [u8]
6136 where
6137 Self: FromBytes,
6138 {
6139 self.as_mut_bytes()
6140 }
6141}
6142
6143/// Analyzes whether a type is [`Unaligned`].
6144///
6145/// This derive analyzes, at compile time, whether the annotated type satisfies
6146/// the [safety conditions] of `Unaligned` and implements `Unaligned` if it is
6147/// sound to do so. This derive can be applied to structs, enums, and unions;
6148/// e.g.:
6149///
6150/// ```
6151/// # use zerocopy_derive::Unaligned;
6152/// #[derive(Unaligned)]
6153/// #[repr(C)]
6154/// struct MyStruct {
6155/// # /*
6156/// ...
6157/// # */
6158/// }
6159///
6160/// #[derive(Unaligned)]
6161/// #[repr(u8)]
6162/// enum MyEnum {
6163/// # Variant0,
6164/// # /*
6165/// ...
6166/// # */
6167/// }
6168///
6169/// #[derive(Unaligned)]
6170/// #[repr(packed)]
6171/// union MyUnion {
6172/// # variant: u8,
6173/// # /*
6174/// ...
6175/// # */
6176/// }
6177/// ```
6178///
6179/// # Analysis
6180///
6181/// *This section describes, roughly, the analysis performed by this derive to
6182/// determine whether it is sound to implement `Unaligned` for a given type.
6183/// Unless you are modifying the implementation of this derive, or attempting to
6184/// manually implement `Unaligned` for a type yourself, you don't need to read
6185/// this section.*
6186///
6187/// If a type has the following properties, then this derive can implement
6188/// `Unaligned` for that type:
6189///
6190/// - If the type is a struct or union:
6191/// - If `repr(align(N))` is provided, `N` must equal 1.
6192/// - If the type is `repr(C)` or `repr(transparent)`, all fields must be
6193/// [`Unaligned`].
6194/// - If the type is not `repr(C)` or `repr(transparent)`, it must be
6195/// `repr(packed)` or `repr(packed(1))`.
6196/// - If the type is an enum:
6197/// - If `repr(align(N))` is provided, `N` must equal 1.
6198/// - It must be a field-less enum (meaning that all variants have no fields).
6199/// - It must be `repr(i8)` or `repr(u8)`.
6200///
6201/// [safety conditions]: trait@Unaligned#safety
6202#[cfg(any(feature = "derive", test))]
6203#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
6204pub use zerocopy_derive::Unaligned;
6205
6206/// Types with no alignment requirement.
6207///
6208/// If `T: Unaligned`, then `align_of::<T>() == 1`.
6209///
6210/// # Implementation
6211///
6212/// **Do not implement this trait yourself!** Instead, use
6213/// [`#[derive(Unaligned)]`][derive]; e.g.:
6214///
6215/// ```
6216/// # use zerocopy_derive::Unaligned;
6217/// #[derive(Unaligned)]
6218/// #[repr(C)]
6219/// struct MyStruct {
6220/// # /*
6221/// ...
6222/// # */
6223/// }
6224///
6225/// #[derive(Unaligned)]
6226/// #[repr(u8)]
6227/// enum MyEnum {
6228/// # Variant0,
6229/// # /*
6230/// ...
6231/// # */
6232/// }
6233///
6234/// #[derive(Unaligned)]
6235/// #[repr(packed)]
6236/// union MyUnion {
6237/// # variant: u8,
6238/// # /*
6239/// ...
6240/// # */
6241/// }
6242/// ```
6243///
6244/// This derive performs a sophisticated, compile-time safety analysis to
6245/// determine whether a type is `Unaligned`.
6246///
6247/// # Safety
6248///
6249/// *This section describes what is required in order for `T: Unaligned`, and
6250/// what unsafe code may assume of such types. If you don't plan on implementing
6251/// `Unaligned` manually, and you don't plan on writing unsafe code that
6252/// operates on `Unaligned` types, then you don't need to read this section.*
6253///
6254/// If `T: Unaligned`, then unsafe code may assume that it is sound to produce a
6255/// reference to `T` at any memory location regardless of alignment. If a type
6256/// is marked as `Unaligned` which violates this contract, it may cause
6257/// undefined behavior.
6258///
6259/// `#[derive(Unaligned)]` only permits [types which satisfy these
6260/// requirements][derive-analysis].
6261///
6262#[cfg_attr(
6263 feature = "derive",
6264 doc = "[derive]: zerocopy_derive::Unaligned",
6265 doc = "[derive-analysis]: zerocopy_derive::Unaligned#analysis"
6266)]
6267#[cfg_attr(
6268 not(feature = "derive"),
6269 doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.Unaligned.html"),
6270 doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.Unaligned.html#analysis"),
6271)]
6272#[cfg_attr(
6273 not(no_zerocopy_diagnostic_on_unimplemented_1_78_0),
6274 diagnostic::on_unimplemented(note = "Consider adding `#[derive(Unaligned)]` to `{Self}`")
6275)]
6276pub unsafe trait Unaligned {
6277 // The `Self: Sized` bound makes it so that `Unaligned` is still object
6278 // safe.
6279 #[doc(hidden)]
6280 fn only_derive_is_allowed_to_implement_this_trait()
6281 where
6282 Self: Sized;
6283}
6284
6285/// Derives optimized [`PartialEq`] and [`Eq`] implementations.
6286///
6287/// This derive can be applied to structs and enums implementing both
6288/// [`Immutable`] and [`IntoBytes`]; e.g.:
6289///
6290/// ```
6291/// # use zerocopy_derive::{ByteEq, Immutable, IntoBytes};
6292/// #[derive(ByteEq, Immutable, IntoBytes)]
6293/// #[repr(C)]
6294/// struct MyStruct {
6295/// # /*
6296/// ...
6297/// # */
6298/// }
6299///
6300/// #[derive(ByteEq, Immutable, IntoBytes)]
6301/// #[repr(u8)]
6302/// enum MyEnum {
6303/// # Variant,
6304/// # /*
6305/// ...
6306/// # */
6307/// }
6308/// ```
6309///
6310/// The standard library's [`derive(Eq, PartialEq)`][derive@PartialEq] computes
6311/// equality by individually comparing each field. Instead, the implementation
6312/// of [`PartialEq::eq`] emitted by `derive(ByteHash)` converts the entirety of
6313/// `self` and `other` to byte slices and compares those slices for equality.
6314/// This may have performance advantages.
6315#[cfg(any(feature = "derive", test))]
6316#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
6317pub use zerocopy_derive::ByteEq;
6318/// Derives an optimized [`Hash`] implementation.
6319///
6320/// This derive can be applied to structs and enums implementing both
6321/// [`Immutable`] and [`IntoBytes`]; e.g.:
6322///
6323/// ```
6324/// # use zerocopy_derive::{ByteHash, Immutable, IntoBytes};
6325/// #[derive(ByteHash, Immutable, IntoBytes)]
6326/// #[repr(C)]
6327/// struct MyStruct {
6328/// # /*
6329/// ...
6330/// # */
6331/// }
6332///
6333/// #[derive(ByteHash, Immutable, IntoBytes)]
6334/// #[repr(u8)]
6335/// enum MyEnum {
6336/// # Variant,
6337/// # /*
6338/// ...
6339/// # */
6340/// }
6341/// ```
6342///
6343/// The standard library's [`derive(Hash)`][derive@Hash] produces hashes by
6344/// individually hashing each field and combining the results. Instead, the
6345/// implementations of [`Hash::hash()`] and [`Hash::hash_slice()`] generated by
6346/// `derive(ByteHash)` convert the entirety of `self` to a byte slice and hashes
6347/// it in a single call to [`Hasher::write()`]. This may have performance
6348/// advantages.
6349///
6350/// [`Hash`]: core::hash::Hash
6351/// [`Hash::hash()`]: core::hash::Hash::hash()
6352/// [`Hash::hash_slice()`]: core::hash::Hash::hash_slice()
6353#[cfg(any(feature = "derive", test))]
6354#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
6355pub use zerocopy_derive::ByteHash;
6356/// Implements [`SplitAt`].
6357///
6358/// This derive can be applied to structs; e.g.:
6359///
6360/// ```
6361/// # use zerocopy_derive::{ByteEq, Immutable, IntoBytes};
6362/// #[derive(ByteEq, Immutable, IntoBytes)]
6363/// #[repr(C)]
6364/// struct MyStruct {
6365/// # /*
6366/// ...
6367/// # */
6368/// }
6369/// ```
6370#[cfg(any(feature = "derive", test))]
6371#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
6372pub use zerocopy_derive::SplitAt;
6373
6374#[cfg(feature = "alloc")]
6375#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
6376#[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
6377mod alloc_support {
6378 use super::*;
6379
6380 /// Extends a `Vec<T>` by pushing `additional` new items onto the end of the
6381 /// vector. The new items are initialized with zeros.
6382 #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
6383 #[doc(hidden)]
6384 #[deprecated(since = "0.8.0", note = "moved to `FromZeros`")]
6385 #[inline(always)]
6386 pub fn extend_vec_zeroed<T: FromZeros>(
6387 v: &mut Vec<T>,
6388 additional: usize,
6389 ) -> Result<(), AllocError> {
6390 <T as FromZeros>::extend_vec_zeroed(v, additional)
6391 }
6392
6393 /// Inserts `additional` new items into `Vec<T>` at `position`. The new
6394 /// items are initialized with zeros.
6395 ///
6396 /// # Panics
6397 ///
6398 /// Panics if `position > v.len()`.
6399 #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
6400 #[doc(hidden)]
6401 #[deprecated(since = "0.8.0", note = "moved to `FromZeros`")]
6402 #[inline(always)]
6403 pub fn insert_vec_zeroed<T: FromZeros>(
6404 v: &mut Vec<T>,
6405 position: usize,
6406 additional: usize,
6407 ) -> Result<(), AllocError> {
6408 <T as FromZeros>::insert_vec_zeroed(v, position, additional)
6409 }
6410}
6411
6412#[cfg(feature = "alloc")]
6413#[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
6414#[doc(hidden)]
6415pub use alloc_support::*;
6416
6417#[cfg(test)]
6418#[allow(clippy::assertions_on_result_states, clippy::unreadable_literal)]
6419mod tests {
6420 use static_assertions::assert_impl_all;
6421
6422 use super::*;
6423 use crate::util::testutil::*;
6424
6425 // An unsized type.
6426 //
6427 // This is used to test the custom derives of our traits. The `[u8]` type
6428 // gets a hand-rolled impl, so it doesn't exercise our custom derives.
6429 #[derive(Debug, Eq, PartialEq, FromBytes, IntoBytes, Unaligned, Immutable)]
6430 #[repr(transparent)]
6431 struct Unsized([u8]);
6432
6433 impl Unsized {
6434 fn from_mut_slice(slc: &mut [u8]) -> &mut Unsized {
6435 // SAFETY: This *probably* sound - since the layouts of `[u8]` and
6436 // `Unsized` are the same, so are the layouts of `&mut [u8]` and
6437 // `&mut Unsized`. [1] Even if it turns out that this isn't actually
6438 // guaranteed by the language spec, we can just change this since
6439 // it's in test code.
6440 //
6441 // [1] https://github.com/rust-lang/unsafe-code-guidelines/issues/375
6442 unsafe { mem::transmute(slc) }
6443 }
6444 }
6445
6446 #[test]
6447 fn test_known_layout() {
6448 // Test that `$ty` and `ManuallyDrop<$ty>` have the expected layout.
6449 // Test that `PhantomData<$ty>` has the same layout as `()` regardless
6450 // of `$ty`.
6451 macro_rules! test {
6452 ($ty:ty, $expect:expr) => {
6453 let expect = $expect;
6454 assert_eq!(<$ty as KnownLayout>::LAYOUT, expect);
6455 assert_eq!(<ManuallyDrop<$ty> as KnownLayout>::LAYOUT, expect);
6456 assert_eq!(<PhantomData<$ty> as KnownLayout>::LAYOUT, <() as KnownLayout>::LAYOUT);
6457 };
6458 }
6459
6460 let layout =
6461 |offset, align, trailing_slice_elem_size, statically_shallow_unpadded| DstLayout {
6462 align: NonZeroUsize::new(align).unwrap(),
6463 size_info: match trailing_slice_elem_size {
6464 None => SizeInfo::Sized { size: offset },
6465 Some(elem_size) => {
6466 SizeInfo::SliceDst(TrailingSliceLayout { offset, elem_size })
6467 }
6468 },
6469 statically_shallow_unpadded,
6470 };
6471
6472 test!((), layout(0, 1, None, false));
6473 test!(u8, layout(1, 1, None, false));
6474 // Use `align_of` because `u64` alignment may be smaller than 8 on some
6475 // platforms.
6476 test!(u64, layout(8, mem::align_of::<u64>(), None, false));
6477 test!(AU64, layout(8, 8, None, false));
6478
6479 test!(Option<&'static ()>, usize::LAYOUT);
6480
6481 test!([()], layout(0, 1, Some(0), true));
6482 test!([u8], layout(0, 1, Some(1), true));
6483 test!(str, layout(0, 1, Some(1), true));
6484 }
6485
6486 #[cfg(feature = "derive")]
6487 #[test]
6488 fn test_known_layout_derive() {
6489 // In this and other files (`late_compile_pass.rs`,
6490 // `mid_compile_pass.rs`, and `struct.rs`), we test success and failure
6491 // modes of `derive(KnownLayout)` for the following combination of
6492 // properties:
6493 //
6494 // +------------+--------------------------------------+-----------+
6495 // | | trailing field properties | |
6496 // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6497 // |------------+----------+----------------+----------+-----------|
6498 // | N | N | N | N | KL00 |
6499 // | N | N | N | Y | KL01 |
6500 // | N | N | Y | N | KL02 |
6501 // | N | N | Y | Y | KL03 |
6502 // | N | Y | N | N | KL04 |
6503 // | N | Y | N | Y | KL05 |
6504 // | N | Y | Y | N | KL06 |
6505 // | N | Y | Y | Y | KL07 |
6506 // | Y | N | N | N | KL08 |
6507 // | Y | N | N | Y | KL09 |
6508 // | Y | N | Y | N | KL10 |
6509 // | Y | N | Y | Y | KL11 |
6510 // | Y | Y | N | N | KL12 |
6511 // | Y | Y | N | Y | KL13 |
6512 // | Y | Y | Y | N | KL14 |
6513 // | Y | Y | Y | Y | KL15 |
6514 // +------------+----------+----------------+----------+-----------+
6515
6516 struct NotKnownLayout<T = ()> {
6517 _t: T,
6518 }
6519
6520 #[derive(KnownLayout)]
6521 #[repr(C)]
6522 struct AlignSize<const ALIGN: usize, const SIZE: usize>
6523 where
6524 elain::Align<ALIGN>: elain::Alignment,
6525 {
6526 _align: elain::Align<ALIGN>,
6527 size: [u8; SIZE],
6528 }
6529
6530 type AU16 = AlignSize<2, 2>;
6531 type AU32 = AlignSize<4, 4>;
6532
6533 fn _assert_kl<T: ?Sized + KnownLayout>(_: &T) {}
6534
6535 let sized_layout = |align, size| DstLayout {
6536 align: NonZeroUsize::new(align).unwrap(),
6537 size_info: SizeInfo::Sized { size },
6538 statically_shallow_unpadded: false,
6539 };
6540
6541 let unsized_layout = |align, elem_size, offset, statically_shallow_unpadded| DstLayout {
6542 align: NonZeroUsize::new(align).unwrap(),
6543 size_info: SizeInfo::SliceDst(TrailingSliceLayout { offset, elem_size }),
6544 statically_shallow_unpadded,
6545 };
6546
6547 // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6548 // | N | N | N | Y | KL01 |
6549 #[allow(dead_code)]
6550 #[derive(KnownLayout)]
6551 struct KL01(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
6552
6553 let expected = DstLayout::for_type::<KL01>();
6554
6555 assert_eq!(<KL01 as KnownLayout>::LAYOUT, expected);
6556 assert_eq!(<KL01 as KnownLayout>::LAYOUT, sized_layout(4, 8));
6557
6558 // ...with `align(N)`:
6559 #[allow(dead_code)]
6560 #[derive(KnownLayout)]
6561 #[repr(align(64))]
6562 struct KL01Align(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
6563
6564 let expected = DstLayout::for_type::<KL01Align>();
6565
6566 assert_eq!(<KL01Align as KnownLayout>::LAYOUT, expected);
6567 assert_eq!(<KL01Align as KnownLayout>::LAYOUT, sized_layout(64, 64));
6568
6569 // ...with `packed`:
6570 #[allow(dead_code)]
6571 #[derive(KnownLayout)]
6572 #[repr(packed)]
6573 struct KL01Packed(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
6574
6575 let expected = DstLayout::for_type::<KL01Packed>();
6576
6577 assert_eq!(<KL01Packed as KnownLayout>::LAYOUT, expected);
6578 assert_eq!(<KL01Packed as KnownLayout>::LAYOUT, sized_layout(1, 6));
6579
6580 // ...with `packed(N)`:
6581 #[allow(dead_code)]
6582 #[derive(KnownLayout)]
6583 #[repr(packed(2))]
6584 struct KL01PackedN(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
6585
6586 assert_impl_all!(KL01PackedN: KnownLayout);
6587
6588 let expected = DstLayout::for_type::<KL01PackedN>();
6589
6590 assert_eq!(<KL01PackedN as KnownLayout>::LAYOUT, expected);
6591 assert_eq!(<KL01PackedN as KnownLayout>::LAYOUT, sized_layout(2, 6));
6592
6593 // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6594 // | N | N | Y | Y | KL03 |
6595 #[allow(dead_code)]
6596 #[derive(KnownLayout)]
6597 struct KL03(NotKnownLayout, u8);
6598
6599 let expected = DstLayout::for_type::<KL03>();
6600
6601 assert_eq!(<KL03 as KnownLayout>::LAYOUT, expected);
6602 assert_eq!(<KL03 as KnownLayout>::LAYOUT, sized_layout(1, 1));
6603
6604 // ... with `align(N)`
6605 #[allow(dead_code)]
6606 #[derive(KnownLayout)]
6607 #[repr(align(64))]
6608 struct KL03Align(NotKnownLayout<AU32>, u8);
6609
6610 let expected = DstLayout::for_type::<KL03Align>();
6611
6612 assert_eq!(<KL03Align as KnownLayout>::LAYOUT, expected);
6613 assert_eq!(<KL03Align as KnownLayout>::LAYOUT, sized_layout(64, 64));
6614
6615 // ... with `packed`:
6616 #[allow(dead_code)]
6617 #[derive(KnownLayout)]
6618 #[repr(packed)]
6619 struct KL03Packed(NotKnownLayout<AU32>, u8);
6620
6621 let expected = DstLayout::for_type::<KL03Packed>();
6622
6623 assert_eq!(<KL03Packed as KnownLayout>::LAYOUT, expected);
6624 assert_eq!(<KL03Packed as KnownLayout>::LAYOUT, sized_layout(1, 5));
6625
6626 // ... with `packed(N)`
6627 #[allow(dead_code)]
6628 #[derive(KnownLayout)]
6629 #[repr(packed(2))]
6630 struct KL03PackedN(NotKnownLayout<AU32>, u8);
6631
6632 assert_impl_all!(KL03PackedN: KnownLayout);
6633
6634 let expected = DstLayout::for_type::<KL03PackedN>();
6635
6636 assert_eq!(<KL03PackedN as KnownLayout>::LAYOUT, expected);
6637 assert_eq!(<KL03PackedN as KnownLayout>::LAYOUT, sized_layout(2, 6));
6638
6639 // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6640 // | N | Y | N | Y | KL05 |
6641 #[allow(dead_code)]
6642 #[derive(KnownLayout)]
6643 struct KL05<T>(u8, T);
6644
6645 fn _test_kl05<T>(t: T) -> impl KnownLayout {
6646 KL05(0u8, t)
6647 }
6648
6649 // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6650 // | N | Y | Y | Y | KL07 |
6651 #[allow(dead_code)]
6652 #[derive(KnownLayout)]
6653 struct KL07<T: KnownLayout>(u8, T);
6654
6655 fn _test_kl07<T: KnownLayout>(t: T) -> impl KnownLayout {
6656 let _ = KL07(0u8, t);
6657 }
6658
6659 // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6660 // | Y | N | Y | N | KL10 |
6661 #[allow(dead_code)]
6662 #[derive(KnownLayout)]
6663 #[repr(C)]
6664 struct KL10(NotKnownLayout<AU32>, [u8]);
6665
6666 let expected = DstLayout::new_zst(None)
6667 .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), None)
6668 .extend(<[u8] as KnownLayout>::LAYOUT, None)
6669 .pad_to_align();
6670
6671 assert_eq!(<KL10 as KnownLayout>::LAYOUT, expected);
6672 assert_eq!(<KL10 as KnownLayout>::LAYOUT, unsized_layout(4, 1, 4, false));
6673
6674 // ...with `align(N)`:
6675 #[allow(dead_code)]
6676 #[derive(KnownLayout)]
6677 #[repr(C, align(64))]
6678 struct KL10Align(NotKnownLayout<AU32>, [u8]);
6679
6680 let repr_align = NonZeroUsize::new(64);
6681
6682 let expected = DstLayout::new_zst(repr_align)
6683 .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), None)
6684 .extend(<[u8] as KnownLayout>::LAYOUT, None)
6685 .pad_to_align();
6686
6687 assert_eq!(<KL10Align as KnownLayout>::LAYOUT, expected);
6688 assert_eq!(<KL10Align as KnownLayout>::LAYOUT, unsized_layout(64, 1, 4, false));
6689
6690 // ...with `packed`:
6691 #[allow(dead_code)]
6692 #[derive(KnownLayout)]
6693 #[repr(C, packed)]
6694 struct KL10Packed(NotKnownLayout<AU32>, [u8]);
6695
6696 let repr_packed = NonZeroUsize::new(1);
6697
6698 let expected = DstLayout::new_zst(None)
6699 .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), repr_packed)
6700 .extend(<[u8] as KnownLayout>::LAYOUT, repr_packed)
6701 .pad_to_align();
6702
6703 assert_eq!(<KL10Packed as KnownLayout>::LAYOUT, expected);
6704 assert_eq!(<KL10Packed as KnownLayout>::LAYOUT, unsized_layout(1, 1, 4, false));
6705
6706 // ...with `packed(N)`:
6707 #[allow(dead_code)]
6708 #[derive(KnownLayout)]
6709 #[repr(C, packed(2))]
6710 struct KL10PackedN(NotKnownLayout<AU32>, [u8]);
6711
6712 let repr_packed = NonZeroUsize::new(2);
6713
6714 let expected = DstLayout::new_zst(None)
6715 .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), repr_packed)
6716 .extend(<[u8] as KnownLayout>::LAYOUT, repr_packed)
6717 .pad_to_align();
6718
6719 assert_eq!(<KL10PackedN as KnownLayout>::LAYOUT, expected);
6720 assert_eq!(<KL10PackedN as KnownLayout>::LAYOUT, unsized_layout(2, 1, 4, false));
6721
6722 // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6723 // | Y | N | Y | Y | KL11 |
6724 #[allow(dead_code)]
6725 #[derive(KnownLayout)]
6726 #[repr(C)]
6727 struct KL11(NotKnownLayout<AU64>, u8);
6728
6729 let expected = DstLayout::new_zst(None)
6730 .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), None)
6731 .extend(<u8 as KnownLayout>::LAYOUT, None)
6732 .pad_to_align();
6733
6734 assert_eq!(<KL11 as KnownLayout>::LAYOUT, expected);
6735 assert_eq!(<KL11 as KnownLayout>::LAYOUT, sized_layout(8, 16));
6736
6737 // ...with `align(N)`:
6738 #[allow(dead_code)]
6739 #[derive(KnownLayout)]
6740 #[repr(C, align(64))]
6741 struct KL11Align(NotKnownLayout<AU64>, u8);
6742
6743 let repr_align = NonZeroUsize::new(64);
6744
6745 let expected = DstLayout::new_zst(repr_align)
6746 .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), None)
6747 .extend(<u8 as KnownLayout>::LAYOUT, None)
6748 .pad_to_align();
6749
6750 assert_eq!(<KL11Align as KnownLayout>::LAYOUT, expected);
6751 assert_eq!(<KL11Align as KnownLayout>::LAYOUT, sized_layout(64, 64));
6752
6753 // ...with `packed`:
6754 #[allow(dead_code)]
6755 #[derive(KnownLayout)]
6756 #[repr(C, packed)]
6757 struct KL11Packed(NotKnownLayout<AU64>, u8);
6758
6759 let repr_packed = NonZeroUsize::new(1);
6760
6761 let expected = DstLayout::new_zst(None)
6762 .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), repr_packed)
6763 .extend(<u8 as KnownLayout>::LAYOUT, repr_packed)
6764 .pad_to_align();
6765
6766 assert_eq!(<KL11Packed as KnownLayout>::LAYOUT, expected);
6767 assert_eq!(<KL11Packed as KnownLayout>::LAYOUT, sized_layout(1, 9));
6768
6769 // ...with `packed(N)`:
6770 #[allow(dead_code)]
6771 #[derive(KnownLayout)]
6772 #[repr(C, packed(2))]
6773 struct KL11PackedN(NotKnownLayout<AU64>, u8);
6774
6775 let repr_packed = NonZeroUsize::new(2);
6776
6777 let expected = DstLayout::new_zst(None)
6778 .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), repr_packed)
6779 .extend(<u8 as KnownLayout>::LAYOUT, repr_packed)
6780 .pad_to_align();
6781
6782 assert_eq!(<KL11PackedN as KnownLayout>::LAYOUT, expected);
6783 assert_eq!(<KL11PackedN as KnownLayout>::LAYOUT, sized_layout(2, 10));
6784
6785 // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6786 // | Y | Y | Y | N | KL14 |
6787 #[allow(dead_code)]
6788 #[derive(KnownLayout)]
6789 #[repr(C)]
6790 struct KL14<T: ?Sized + KnownLayout>(u8, T);
6791
6792 fn _test_kl14<T: ?Sized + KnownLayout>(kl: &KL14<T>) {
6793 _assert_kl(kl)
6794 }
6795
6796 // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6797 // | Y | Y | Y | Y | KL15 |
6798 #[allow(dead_code)]
6799 #[derive(KnownLayout)]
6800 #[repr(C)]
6801 struct KL15<T: KnownLayout>(u8, T);
6802
6803 fn _test_kl15<T: KnownLayout>(t: T) -> impl KnownLayout {
6804 let _ = KL15(0u8, t);
6805 }
6806
6807 // Test a variety of combinations of field types:
6808 // - ()
6809 // - u8
6810 // - AU16
6811 // - [()]
6812 // - [u8]
6813 // - [AU16]
6814
6815 #[allow(clippy::upper_case_acronyms, dead_code)]
6816 #[derive(KnownLayout)]
6817 #[repr(C)]
6818 struct KLTU<T, U: ?Sized>(T, U);
6819
6820 assert_eq!(<KLTU<(), ()> as KnownLayout>::LAYOUT, sized_layout(1, 0));
6821
6822 assert_eq!(<KLTU<(), u8> as KnownLayout>::LAYOUT, sized_layout(1, 1));
6823
6824 assert_eq!(<KLTU<(), AU16> as KnownLayout>::LAYOUT, sized_layout(2, 2));
6825
6826 assert_eq!(<KLTU<(), [()]> as KnownLayout>::LAYOUT, unsized_layout(1, 0, 0, false));
6827
6828 assert_eq!(<KLTU<(), [u8]> as KnownLayout>::LAYOUT, unsized_layout(1, 1, 0, false));
6829
6830 assert_eq!(<KLTU<(), [AU16]> as KnownLayout>::LAYOUT, unsized_layout(2, 2, 0, false));
6831
6832 assert_eq!(<KLTU<u8, ()> as KnownLayout>::LAYOUT, sized_layout(1, 1));
6833
6834 assert_eq!(<KLTU<u8, u8> as KnownLayout>::LAYOUT, sized_layout(1, 2));
6835
6836 assert_eq!(<KLTU<u8, AU16> as KnownLayout>::LAYOUT, sized_layout(2, 4));
6837
6838 assert_eq!(<KLTU<u8, [()]> as KnownLayout>::LAYOUT, unsized_layout(1, 0, 1, false));
6839
6840 assert_eq!(<KLTU<u8, [u8]> as KnownLayout>::LAYOUT, unsized_layout(1, 1, 1, false));
6841
6842 assert_eq!(<KLTU<u8, [AU16]> as KnownLayout>::LAYOUT, unsized_layout(2, 2, 2, false));
6843
6844 assert_eq!(<KLTU<AU16, ()> as KnownLayout>::LAYOUT, sized_layout(2, 2));
6845
6846 assert_eq!(<KLTU<AU16, u8> as KnownLayout>::LAYOUT, sized_layout(2, 4));
6847
6848 assert_eq!(<KLTU<AU16, AU16> as KnownLayout>::LAYOUT, sized_layout(2, 4));
6849
6850 assert_eq!(<KLTU<AU16, [()]> as KnownLayout>::LAYOUT, unsized_layout(2, 0, 2, false));
6851
6852 assert_eq!(<KLTU<AU16, [u8]> as KnownLayout>::LAYOUT, unsized_layout(2, 1, 2, false));
6853
6854 assert_eq!(<KLTU<AU16, [AU16]> as KnownLayout>::LAYOUT, unsized_layout(2, 2, 2, false));
6855
6856 // Test a variety of field counts.
6857
6858 #[derive(KnownLayout)]
6859 #[repr(C)]
6860 struct KLF0;
6861
6862 assert_eq!(<KLF0 as KnownLayout>::LAYOUT, sized_layout(1, 0));
6863
6864 #[derive(KnownLayout)]
6865 #[repr(C)]
6866 struct KLF1([u8]);
6867
6868 assert_eq!(<KLF1 as KnownLayout>::LAYOUT, unsized_layout(1, 1, 0, true));
6869
6870 #[derive(KnownLayout)]
6871 #[repr(C)]
6872 struct KLF2(NotKnownLayout<u8>, [u8]);
6873
6874 assert_eq!(<KLF2 as KnownLayout>::LAYOUT, unsized_layout(1, 1, 1, false));
6875
6876 #[derive(KnownLayout)]
6877 #[repr(C)]
6878 struct KLF3(NotKnownLayout<u8>, NotKnownLayout<AU16>, [u8]);
6879
6880 assert_eq!(<KLF3 as KnownLayout>::LAYOUT, unsized_layout(2, 1, 4, false));
6881
6882 #[derive(KnownLayout)]
6883 #[repr(C)]
6884 struct KLF4(NotKnownLayout<u8>, NotKnownLayout<AU16>, NotKnownLayout<AU32>, [u8]);
6885
6886 assert_eq!(<KLF4 as KnownLayout>::LAYOUT, unsized_layout(4, 1, 8, false));
6887 }
6888
6889 #[test]
6890 fn test_object_safety() {
6891 fn _takes_immutable(_: &dyn Immutable) {}
6892 fn _takes_unaligned(_: &dyn Unaligned) {}
6893 }
6894
6895 #[test]
6896 fn test_from_zeros_only() {
6897 // Test types that implement `FromZeros` but not `FromBytes`.
6898
6899 assert!(!bool::new_zeroed());
6900 assert_eq!(char::new_zeroed(), '\0');
6901
6902 #[cfg(feature = "alloc")]
6903 {
6904 assert_eq!(bool::new_box_zeroed(), Ok(Box::new(false)));
6905 assert_eq!(char::new_box_zeroed(), Ok(Box::new('\0')));
6906
6907 assert_eq!(
6908 <[bool]>::new_box_zeroed_with_elems(3).unwrap().as_ref(),
6909 [false, false, false]
6910 );
6911 assert_eq!(
6912 <[char]>::new_box_zeroed_with_elems(3).unwrap().as_ref(),
6913 ['\0', '\0', '\0']
6914 );
6915
6916 assert_eq!(bool::new_vec_zeroed(3).unwrap().as_ref(), [false, false, false]);
6917 assert_eq!(char::new_vec_zeroed(3).unwrap().as_ref(), ['\0', '\0', '\0']);
6918 }
6919
6920 let mut string = "hello".to_string();
6921 let s: &mut str = string.as_mut();
6922 assert_eq!(s, "hello");
6923 s.zero();
6924 assert_eq!(s, "\0\0\0\0\0");
6925 }
6926
6927 #[test]
6928 fn test_zst_count_preserved() {
6929 // Test that, when an explicit count is provided to for a type with a
6930 // ZST trailing slice element, that count is preserved. This is
6931 // important since, for such types, all element counts result in objects
6932 // of the same size, and so the correct behavior is ambiguous. However,
6933 // preserving the count as requested by the user is the behavior that we
6934 // document publicly.
6935
6936 // FromZeros methods
6937 #[cfg(feature = "alloc")]
6938 assert_eq!(<[()]>::new_box_zeroed_with_elems(3).unwrap().len(), 3);
6939 #[cfg(feature = "alloc")]
6940 assert_eq!(<()>::new_vec_zeroed(3).unwrap().len(), 3);
6941
6942 // FromBytes methods
6943 assert_eq!(<[()]>::ref_from_bytes_with_elems(&[][..], 3).unwrap().len(), 3);
6944 assert_eq!(<[()]>::ref_from_prefix_with_elems(&[][..], 3).unwrap().0.len(), 3);
6945 assert_eq!(<[()]>::ref_from_suffix_with_elems(&[][..], 3).unwrap().1.len(), 3);
6946 assert_eq!(<[()]>::mut_from_bytes_with_elems(&mut [][..], 3).unwrap().len(), 3);
6947 assert_eq!(<[()]>::mut_from_prefix_with_elems(&mut [][..], 3).unwrap().0.len(), 3);
6948 assert_eq!(<[()]>::mut_from_suffix_with_elems(&mut [][..], 3).unwrap().1.len(), 3);
6949 }
6950
6951 #[test]
6952 fn test_read_write() {
6953 const VAL: u64 = 0x12345678;
6954 #[cfg(target_endian = "big")]
6955 const VAL_BYTES: [u8; 8] = VAL.to_be_bytes();
6956 #[cfg(target_endian = "little")]
6957 const VAL_BYTES: [u8; 8] = VAL.to_le_bytes();
6958 const ZEROS: [u8; 8] = [0u8; 8];
6959
6960 // Test `FromBytes::{read_from, read_from_prefix, read_from_suffix}`.
6961
6962 assert_eq!(u64::read_from_bytes(&VAL_BYTES[..]), Ok(VAL));
6963 // The first 8 bytes are from `VAL_BYTES` and the second 8 bytes are all
6964 // zeros.
6965 let bytes_with_prefix: [u8; 16] = transmute!([VAL_BYTES, [0; 8]]);
6966 assert_eq!(u64::read_from_prefix(&bytes_with_prefix[..]), Ok((VAL, &ZEROS[..])));
6967 assert_eq!(u64::read_from_suffix(&bytes_with_prefix[..]), Ok((&VAL_BYTES[..], 0)));
6968 // The first 8 bytes are all zeros and the second 8 bytes are from
6969 // `VAL_BYTES`
6970 let bytes_with_suffix: [u8; 16] = transmute!([[0; 8], VAL_BYTES]);
6971 assert_eq!(u64::read_from_prefix(&bytes_with_suffix[..]), Ok((0, &VAL_BYTES[..])));
6972 assert_eq!(u64::read_from_suffix(&bytes_with_suffix[..]), Ok((&ZEROS[..], VAL)));
6973
6974 // Test `IntoBytes::{write_to, write_to_prefix, write_to_suffix}`.
6975
6976 let mut bytes = [0u8; 8];
6977 assert_eq!(VAL.write_to(&mut bytes[..]), Ok(()));
6978 assert_eq!(bytes, VAL_BYTES);
6979 let mut bytes = [0u8; 16];
6980 assert_eq!(VAL.write_to_prefix(&mut bytes[..]), Ok(()));
6981 let want: [u8; 16] = transmute!([VAL_BYTES, [0; 8]]);
6982 assert_eq!(bytes, want);
6983 let mut bytes = [0u8; 16];
6984 assert_eq!(VAL.write_to_suffix(&mut bytes[..]), Ok(()));
6985 let want: [u8; 16] = transmute!([[0; 8], VAL_BYTES]);
6986 assert_eq!(bytes, want);
6987 }
6988
6989 #[test]
6990 #[cfg(feature = "std")]
6991 fn test_read_io_with_padding_soundness() {
6992 // This test is designed to exhibit potential UB in
6993 // `FromBytes::read_from_io`. (see #2319, #2320).
6994
6995 // On most platforms (where `align_of::<u16>() == 2`), `WithPadding`
6996 // will have inter-field padding between `x` and `y`.
6997 #[derive(FromBytes)]
6998 #[repr(C)]
6999 struct WithPadding {
7000 x: u8,
7001 y: u16,
7002 }
7003 struct ReadsInRead;
7004 impl std::io::Read for ReadsInRead {
7005 fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
7006 // This body branches on every byte of `buf`, ensuring that it
7007 // exhibits UB if any byte of `buf` is uninitialized.
7008 if buf.iter().all(|&x| x == 0) {
7009 Ok(buf.len())
7010 } else {
7011 buf.iter_mut().for_each(|x| *x = 0);
7012 Ok(buf.len())
7013 }
7014 }
7015 }
7016 assert!(matches!(WithPadding::read_from_io(ReadsInRead), Ok(WithPadding { x: 0, y: 0 })));
7017 }
7018
7019 #[test]
7020 #[cfg(feature = "std")]
7021 fn test_read_write_io() {
7022 let mut long_buffer = [0, 0, 0, 0];
7023 assert!(matches!(u16::MAX.write_to_io(&mut long_buffer[..]), Ok(())));
7024 assert_eq!(long_buffer, [255, 255, 0, 0]);
7025 assert!(matches!(u16::read_from_io(&long_buffer[..]), Ok(u16::MAX)));
7026
7027 let mut short_buffer = [0, 0];
7028 assert!(u32::MAX.write_to_io(&mut short_buffer[..]).is_err());
7029 assert_eq!(short_buffer, [255, 255]);
7030 assert!(u32::read_from_io(&short_buffer[..]).is_err());
7031 }
7032
7033 #[test]
7034 fn test_try_from_bytes_try_read_from() {
7035 assert_eq!(<bool as TryFromBytes>::try_read_from_bytes(&[0]), Ok(false));
7036 assert_eq!(<bool as TryFromBytes>::try_read_from_bytes(&[1]), Ok(true));
7037
7038 assert_eq!(<bool as TryFromBytes>::try_read_from_prefix(&[0, 2]), Ok((false, &[2][..])));
7039 assert_eq!(<bool as TryFromBytes>::try_read_from_prefix(&[1, 2]), Ok((true, &[2][..])));
7040
7041 assert_eq!(<bool as TryFromBytes>::try_read_from_suffix(&[2, 0]), Ok((&[2][..], false)));
7042 assert_eq!(<bool as TryFromBytes>::try_read_from_suffix(&[2, 1]), Ok((&[2][..], true)));
7043
7044 // If we don't pass enough bytes, it fails.
7045 assert!(matches!(
7046 <u8 as TryFromBytes>::try_read_from_bytes(&[]),
7047 Err(TryReadError::Size(_))
7048 ));
7049 assert!(matches!(
7050 <u8 as TryFromBytes>::try_read_from_prefix(&[]),
7051 Err(TryReadError::Size(_))
7052 ));
7053 assert!(matches!(
7054 <u8 as TryFromBytes>::try_read_from_suffix(&[]),
7055 Err(TryReadError::Size(_))
7056 ));
7057
7058 // If we pass too many bytes, it fails.
7059 assert!(matches!(
7060 <u8 as TryFromBytes>::try_read_from_bytes(&[0, 0]),
7061 Err(TryReadError::Size(_))
7062 ));
7063
7064 // If we pass an invalid value, it fails.
7065 assert!(matches!(
7066 <bool as TryFromBytes>::try_read_from_bytes(&[2]),
7067 Err(TryReadError::Validity(_))
7068 ));
7069 assert!(matches!(
7070 <bool as TryFromBytes>::try_read_from_prefix(&[2, 0]),
7071 Err(TryReadError::Validity(_))
7072 ));
7073 assert!(matches!(
7074 <bool as TryFromBytes>::try_read_from_suffix(&[0, 2]),
7075 Err(TryReadError::Validity(_))
7076 ));
7077
7078 // Reading from a misaligned buffer should still succeed. Since `AU64`'s
7079 // alignment is 8, and since we read from two adjacent addresses one
7080 // byte apart, it is guaranteed that at least one of them (though
7081 // possibly both) will be misaligned.
7082 let bytes: [u8; 9] = [0, 0, 0, 0, 0, 0, 0, 0, 0];
7083 assert_eq!(<AU64 as TryFromBytes>::try_read_from_bytes(&bytes[..8]), Ok(AU64(0)));
7084 assert_eq!(<AU64 as TryFromBytes>::try_read_from_bytes(&bytes[1..9]), Ok(AU64(0)));
7085
7086 assert_eq!(
7087 <AU64 as TryFromBytes>::try_read_from_prefix(&bytes[..8]),
7088 Ok((AU64(0), &[][..]))
7089 );
7090 assert_eq!(
7091 <AU64 as TryFromBytes>::try_read_from_prefix(&bytes[1..9]),
7092 Ok((AU64(0), &[][..]))
7093 );
7094
7095 assert_eq!(
7096 <AU64 as TryFromBytes>::try_read_from_suffix(&bytes[..8]),
7097 Ok((&[][..], AU64(0)))
7098 );
7099 assert_eq!(
7100 <AU64 as TryFromBytes>::try_read_from_suffix(&bytes[1..9]),
7101 Ok((&[][..], AU64(0)))
7102 );
7103 }
7104
7105 #[test]
7106 fn test_ref_from_mut_from_bytes() {
7107 // Test `FromBytes::{ref_from_bytes, mut_from_bytes}{,_prefix,Suffix}`
7108 // success cases. Exhaustive coverage for these methods is covered by
7109 // the `Ref` tests above, which these helper methods defer to.
7110
7111 let mut buf =
7112 Align::<[u8; 16], AU64>::new([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);
7113
7114 assert_eq!(
7115 AU64::ref_from_bytes(&buf.t[8..]).unwrap().0.to_ne_bytes(),
7116 [8, 9, 10, 11, 12, 13, 14, 15]
7117 );
7118 let suffix = AU64::mut_from_bytes(&mut buf.t[8..]).unwrap();
7119 suffix.0 = 0x0101010101010101;
7120 // The `[u8:9]` is a non-half size of the full buffer, which would catch
7121 // `from_prefix` having the same implementation as `from_suffix` (issues #506, #511).
7122 assert_eq!(
7123 <[u8; 9]>::ref_from_suffix(&buf.t[..]).unwrap(),
7124 (&[0, 1, 2, 3, 4, 5, 6][..], &[7u8, 1, 1, 1, 1, 1, 1, 1, 1])
7125 );
7126 let (prefix, suffix) = AU64::mut_from_suffix(&mut buf.t[1..]).unwrap();
7127 assert_eq!(prefix, &mut [1u8, 2, 3, 4, 5, 6, 7][..]);
7128 suffix.0 = 0x0202020202020202;
7129 let (prefix, suffix) = <[u8; 10]>::mut_from_suffix(&mut buf.t[..]).unwrap();
7130 assert_eq!(prefix, &mut [0u8, 1, 2, 3, 4, 5][..]);
7131 suffix[0] = 42;
7132 assert_eq!(
7133 <[u8; 9]>::ref_from_prefix(&buf.t[..]).unwrap(),
7134 (&[0u8, 1, 2, 3, 4, 5, 42, 7, 2], &[2u8, 2, 2, 2, 2, 2, 2][..])
7135 );
7136 <[u8; 2]>::mut_from_prefix(&mut buf.t[..]).unwrap().0[1] = 30;
7137 assert_eq!(buf.t, [0, 30, 2, 3, 4, 5, 42, 7, 2, 2, 2, 2, 2, 2, 2, 2]);
7138 }
7139
7140 #[test]
7141 fn test_ref_from_mut_from_bytes_error() {
7142 // Test `FromBytes::{ref_from_bytes, mut_from_bytes}{,_prefix,Suffix}`
7143 // error cases.
7144
7145 // Fail because the buffer is too large.
7146 let mut buf = Align::<[u8; 16], AU64>::default();
7147 // `buf.t` should be aligned to 8, so only the length check should fail.
7148 assert!(AU64::ref_from_bytes(&buf.t[..]).is_err());
7149 assert!(AU64::mut_from_bytes(&mut buf.t[..]).is_err());
7150 assert!(<[u8; 8]>::ref_from_bytes(&buf.t[..]).is_err());
7151 assert!(<[u8; 8]>::mut_from_bytes(&mut buf.t[..]).is_err());
7152
7153 // Fail because the buffer is too small.
7154 let mut buf = Align::<[u8; 4], AU64>::default();
7155 assert!(AU64::ref_from_bytes(&buf.t[..]).is_err());
7156 assert!(AU64::mut_from_bytes(&mut buf.t[..]).is_err());
7157 assert!(<[u8; 8]>::ref_from_bytes(&buf.t[..]).is_err());
7158 assert!(<[u8; 8]>::mut_from_bytes(&mut buf.t[..]).is_err());
7159 assert!(AU64::ref_from_prefix(&buf.t[..]).is_err());
7160 assert!(AU64::mut_from_prefix(&mut buf.t[..]).is_err());
7161 assert!(AU64::ref_from_suffix(&buf.t[..]).is_err());
7162 assert!(AU64::mut_from_suffix(&mut buf.t[..]).is_err());
7163 assert!(<[u8; 8]>::ref_from_prefix(&buf.t[..]).is_err());
7164 assert!(<[u8; 8]>::mut_from_prefix(&mut buf.t[..]).is_err());
7165 assert!(<[u8; 8]>::ref_from_suffix(&buf.t[..]).is_err());
7166 assert!(<[u8; 8]>::mut_from_suffix(&mut buf.t[..]).is_err());
7167
7168 // Fail because the alignment is insufficient.
7169 let mut buf = Align::<[u8; 13], AU64>::default();
7170 assert!(AU64::ref_from_bytes(&buf.t[1..]).is_err());
7171 assert!(AU64::mut_from_bytes(&mut buf.t[1..]).is_err());
7172 assert!(AU64::ref_from_bytes(&buf.t[1..]).is_err());
7173 assert!(AU64::mut_from_bytes(&mut buf.t[1..]).is_err());
7174 assert!(AU64::ref_from_prefix(&buf.t[1..]).is_err());
7175 assert!(AU64::mut_from_prefix(&mut buf.t[1..]).is_err());
7176 assert!(AU64::ref_from_suffix(&buf.t[..]).is_err());
7177 assert!(AU64::mut_from_suffix(&mut buf.t[..]).is_err());
7178 }
7179
7180 #[test]
7181 fn test_to_methods() {
7182 /// Run a series of tests by calling `IntoBytes` methods on `t`.
7183 ///
7184 /// `bytes` is the expected byte sequence returned from `t.as_bytes()`
7185 /// before `t` has been modified. `post_mutation` is the expected
7186 /// sequence returned from `t.as_bytes()` after `t.as_mut_bytes()[0]`
7187 /// has had its bits flipped (by applying `^= 0xFF`).
7188 ///
7189 /// `N` is the size of `t` in bytes.
7190 fn test<T: FromBytes + IntoBytes + Immutable + Debug + Eq + ?Sized, const N: usize>(
7191 t: &mut T,
7192 bytes: &[u8],
7193 post_mutation: &T,
7194 ) {
7195 // Test that we can access the underlying bytes, and that we get the
7196 // right bytes and the right number of bytes.
7197 assert_eq!(t.as_bytes(), bytes);
7198
7199 // Test that changes to the underlying byte slices are reflected in
7200 // the original object.
7201 t.as_mut_bytes()[0] ^= 0xFF;
7202 assert_eq!(t, post_mutation);
7203 t.as_mut_bytes()[0] ^= 0xFF;
7204
7205 // `write_to` rejects slices that are too small or too large.
7206 assert!(t.write_to(&mut vec![0; N - 1][..]).is_err());
7207 assert!(t.write_to(&mut vec![0; N + 1][..]).is_err());
7208
7209 // `write_to` works as expected.
7210 let mut bytes = [0; N];
7211 assert_eq!(t.write_to(&mut bytes[..]), Ok(()));
7212 assert_eq!(bytes, t.as_bytes());
7213
7214 // `write_to_prefix` rejects slices that are too small.
7215 assert!(t.write_to_prefix(&mut vec![0; N - 1][..]).is_err());
7216
7217 // `write_to_prefix` works with exact-sized slices.
7218 let mut bytes = [0; N];
7219 assert_eq!(t.write_to_prefix(&mut bytes[..]), Ok(()));
7220 assert_eq!(bytes, t.as_bytes());
7221
7222 // `write_to_prefix` works with too-large slices, and any bytes past
7223 // the prefix aren't modified.
7224 let mut too_many_bytes = vec![0; N + 1];
7225 too_many_bytes[N] = 123;
7226 assert_eq!(t.write_to_prefix(&mut too_many_bytes[..]), Ok(()));
7227 assert_eq!(&too_many_bytes[..N], t.as_bytes());
7228 assert_eq!(too_many_bytes[N], 123);
7229
7230 // `write_to_suffix` rejects slices that are too small.
7231 assert!(t.write_to_suffix(&mut vec![0; N - 1][..]).is_err());
7232
7233 // `write_to_suffix` works with exact-sized slices.
7234 let mut bytes = [0; N];
7235 assert_eq!(t.write_to_suffix(&mut bytes[..]), Ok(()));
7236 assert_eq!(bytes, t.as_bytes());
7237
7238 // `write_to_suffix` works with too-large slices, and any bytes
7239 // before the suffix aren't modified.
7240 let mut too_many_bytes = vec![0; N + 1];
7241 too_many_bytes[0] = 123;
7242 assert_eq!(t.write_to_suffix(&mut too_many_bytes[..]), Ok(()));
7243 assert_eq!(&too_many_bytes[1..], t.as_bytes());
7244 assert_eq!(too_many_bytes[0], 123);
7245 }
7246
7247 #[derive(Debug, Eq, PartialEq, FromBytes, IntoBytes, Immutable)]
7248 #[repr(C)]
7249 struct Foo {
7250 a: u32,
7251 b: Wrapping<u32>,
7252 c: Option<NonZeroU32>,
7253 }
7254
7255 let expected_bytes: Vec<u8> = if cfg!(target_endian = "little") {
7256 vec![1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0]
7257 } else {
7258 vec![0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 0]
7259 };
7260 let post_mutation_expected_a =
7261 if cfg!(target_endian = "little") { 0x00_00_00_FE } else { 0xFF_00_00_01 };
7262 test::<_, 12>(
7263 &mut Foo { a: 1, b: Wrapping(2), c: None },
7264 expected_bytes.as_bytes(),
7265 &Foo { a: post_mutation_expected_a, b: Wrapping(2), c: None },
7266 );
7267 test::<_, 3>(
7268 Unsized::from_mut_slice(&mut [1, 2, 3]),
7269 &[1, 2, 3],
7270 Unsized::from_mut_slice(&mut [0xFE, 2, 3]),
7271 );
7272 }
7273
7274 #[test]
7275 fn test_array() {
7276 #[derive(FromBytes, IntoBytes, Immutable)]
7277 #[repr(C)]
7278 struct Foo {
7279 a: [u16; 33],
7280 }
7281
7282 let foo = Foo { a: [0xFFFF; 33] };
7283 let expected = [0xFFu8; 66];
7284 assert_eq!(foo.as_bytes(), &expected[..]);
7285 }
7286
7287 #[test]
7288 fn test_new_zeroed() {
7289 assert!(!bool::new_zeroed());
7290 assert_eq!(u64::new_zeroed(), 0);
7291 // This test exists in order to exercise unsafe code, especially when
7292 // running under Miri.
7293 #[allow(clippy::unit_cmp)]
7294 {
7295 assert_eq!(<()>::new_zeroed(), ());
7296 }
7297 }
7298
7299 #[test]
7300 fn test_transparent_packed_generic_struct() {
7301 #[derive(IntoBytes, FromBytes, Unaligned)]
7302 #[repr(transparent)]
7303 #[allow(dead_code)] // We never construct this type
7304 struct Foo<T> {
7305 _t: T,
7306 _phantom: PhantomData<()>,
7307 }
7308
7309 assert_impl_all!(Foo<u32>: FromZeros, FromBytes, IntoBytes);
7310 assert_impl_all!(Foo<u8>: Unaligned);
7311
7312 #[derive(IntoBytes, FromBytes, Unaligned)]
7313 #[repr(C, packed)]
7314 #[allow(dead_code)] // We never construct this type
7315 struct Bar<T, U> {
7316 _t: T,
7317 _u: U,
7318 }
7319
7320 assert_impl_all!(Bar<u8, AU64>: FromZeros, FromBytes, IntoBytes, Unaligned);
7321 }
7322
7323 #[cfg(feature = "alloc")]
7324 mod alloc {
7325 use super::*;
7326
7327 #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
7328 #[test]
7329 fn test_extend_vec_zeroed() {
7330 // Test extending when there is an existing allocation.
7331 let mut v = vec![100u16, 200, 300];
7332 FromZeros::extend_vec_zeroed(&mut v, 3).unwrap();
7333 assert_eq!(v.len(), 6);
7334 assert_eq!(&*v, &[100, 200, 300, 0, 0, 0]);
7335 drop(v);
7336
7337 // Test extending when there is no existing allocation.
7338 let mut v: Vec<u64> = Vec::new();
7339 FromZeros::extend_vec_zeroed(&mut v, 3).unwrap();
7340 assert_eq!(v.len(), 3);
7341 assert_eq!(&*v, &[0, 0, 0]);
7342 drop(v);
7343 }
7344
7345 #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
7346 #[test]
7347 fn test_extend_vec_zeroed_zst() {
7348 // Test extending when there is an existing (fake) allocation.
7349 let mut v = vec![(), (), ()];
7350 <()>::extend_vec_zeroed(&mut v, 3).unwrap();
7351 assert_eq!(v.len(), 6);
7352 assert_eq!(&*v, &[(), (), (), (), (), ()]);
7353 drop(v);
7354
7355 // Test extending when there is no existing (fake) allocation.
7356 let mut v: Vec<()> = Vec::new();
7357 <()>::extend_vec_zeroed(&mut v, 3).unwrap();
7358 assert_eq!(&*v, &[(), (), ()]);
7359 drop(v);
7360 }
7361
7362 #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
7363 #[test]
7364 fn test_insert_vec_zeroed() {
7365 // Insert at start (no existing allocation).
7366 let mut v: Vec<u64> = Vec::new();
7367 u64::insert_vec_zeroed(&mut v, 0, 2).unwrap();
7368 assert_eq!(v.len(), 2);
7369 assert_eq!(&*v, &[0, 0]);
7370 drop(v);
7371
7372 // Insert at start.
7373 let mut v = vec![100u64, 200, 300];
7374 u64::insert_vec_zeroed(&mut v, 0, 2).unwrap();
7375 assert_eq!(v.len(), 5);
7376 assert_eq!(&*v, &[0, 0, 100, 200, 300]);
7377 drop(v);
7378
7379 // Insert at middle.
7380 let mut v = vec![100u64, 200, 300];
7381 u64::insert_vec_zeroed(&mut v, 1, 1).unwrap();
7382 assert_eq!(v.len(), 4);
7383 assert_eq!(&*v, &[100, 0, 200, 300]);
7384 drop(v);
7385
7386 // Insert at end.
7387 let mut v = vec![100u64, 200, 300];
7388 u64::insert_vec_zeroed(&mut v, 3, 1).unwrap();
7389 assert_eq!(v.len(), 4);
7390 assert_eq!(&*v, &[100, 200, 300, 0]);
7391 drop(v);
7392 }
7393
7394 #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
7395 #[test]
7396 fn test_insert_vec_zeroed_zst() {
7397 // Insert at start (no existing fake allocation).
7398 let mut v: Vec<()> = Vec::new();
7399 <()>::insert_vec_zeroed(&mut v, 0, 2).unwrap();
7400 assert_eq!(v.len(), 2);
7401 assert_eq!(&*v, &[(), ()]);
7402 drop(v);
7403
7404 // Insert at start.
7405 let mut v = vec![(), (), ()];
7406 <()>::insert_vec_zeroed(&mut v, 0, 2).unwrap();
7407 assert_eq!(v.len(), 5);
7408 assert_eq!(&*v, &[(), (), (), (), ()]);
7409 drop(v);
7410
7411 // Insert at middle.
7412 let mut v = vec![(), (), ()];
7413 <()>::insert_vec_zeroed(&mut v, 1, 1).unwrap();
7414 assert_eq!(v.len(), 4);
7415 assert_eq!(&*v, &[(), (), (), ()]);
7416 drop(v);
7417
7418 // Insert at end.
7419 let mut v = vec![(), (), ()];
7420 <()>::insert_vec_zeroed(&mut v, 3, 1).unwrap();
7421 assert_eq!(v.len(), 4);
7422 assert_eq!(&*v, &[(), (), (), ()]);
7423 drop(v);
7424 }
7425
7426 #[test]
7427 fn test_new_box_zeroed() {
7428 assert_eq!(u64::new_box_zeroed(), Ok(Box::new(0)));
7429 }
7430
7431 #[test]
7432 fn test_new_box_zeroed_array() {
7433 drop(<[u32; 0x1000]>::new_box_zeroed());
7434 }
7435
7436 #[test]
7437 fn test_new_box_zeroed_zst() {
7438 // This test exists in order to exercise unsafe code, especially
7439 // when running under Miri.
7440 #[allow(clippy::unit_cmp)]
7441 {
7442 assert_eq!(<()>::new_box_zeroed(), Ok(Box::new(())));
7443 }
7444 }
7445
7446 #[test]
7447 fn test_new_box_zeroed_with_elems() {
7448 let mut s: Box<[u64]> = <[u64]>::new_box_zeroed_with_elems(3).unwrap();
7449 assert_eq!(s.len(), 3);
7450 assert_eq!(&*s, &[0, 0, 0]);
7451 s[1] = 3;
7452 assert_eq!(&*s, &[0, 3, 0]);
7453 }
7454
7455 #[test]
7456 fn test_new_box_zeroed_with_elems_empty() {
7457 let s: Box<[u64]> = <[u64]>::new_box_zeroed_with_elems(0).unwrap();
7458 assert_eq!(s.len(), 0);
7459 }
7460
7461 #[test]
7462 fn test_new_box_zeroed_with_elems_zst() {
7463 let mut s: Box<[()]> = <[()]>::new_box_zeroed_with_elems(3).unwrap();
7464 assert_eq!(s.len(), 3);
7465 assert!(s.get(10).is_none());
7466 // This test exists in order to exercise unsafe code, especially
7467 // when running under Miri.
7468 #[allow(clippy::unit_cmp)]
7469 {
7470 assert_eq!(s[1], ());
7471 }
7472 s[2] = ();
7473 }
7474
7475 #[test]
7476 fn test_new_box_zeroed_with_elems_zst_empty() {
7477 let s: Box<[()]> = <[()]>::new_box_zeroed_with_elems(0).unwrap();
7478 assert_eq!(s.len(), 0);
7479 }
7480
7481 #[test]
7482 fn new_box_zeroed_with_elems_errors() {
7483 assert_eq!(<[u16]>::new_box_zeroed_with_elems(usize::MAX), Err(AllocError));
7484
7485 let max = <usize as core::convert::TryFrom<_>>::try_from(isize::MAX).unwrap();
7486 assert_eq!(
7487 <[u16]>::new_box_zeroed_with_elems((max / mem::size_of::<u16>()) + 1),
7488 Err(AllocError)
7489 );
7490 }
7491 }
7492
7493 #[test]
7494 #[allow(deprecated)]
7495 fn test_deprecated_from_bytes() {
7496 let val = 0u32;
7497 let bytes = val.as_bytes();
7498
7499 assert!(u32::ref_from(bytes).is_some());
7500 // mut_from needs mut bytes
7501 let mut val = 0u32;
7502 let mut_bytes = val.as_mut_bytes();
7503 assert!(u32::mut_from(mut_bytes).is_some());
7504
7505 assert!(u32::read_from(bytes).is_some());
7506
7507 let (slc, rest) = <u32>::slice_from_prefix(bytes, 0).unwrap();
7508 assert!(slc.is_empty());
7509 assert_eq!(rest.len(), 4);
7510
7511 let (rest, slc) = <u32>::slice_from_suffix(bytes, 0).unwrap();
7512 assert!(slc.is_empty());
7513 assert_eq!(rest.len(), 4);
7514
7515 let (slc, rest) = <u32>::mut_slice_from_prefix(mut_bytes, 0).unwrap();
7516 assert!(slc.is_empty());
7517 assert_eq!(rest.len(), 4);
7518
7519 let (rest, slc) = <u32>::mut_slice_from_suffix(mut_bytes, 0).unwrap();
7520 assert!(slc.is_empty());
7521 assert_eq!(rest.len(), 4);
7522 }
7523
7524 #[test]
7525 fn test_try_ref_from_prefix_suffix() {
7526 use crate::util::testutil::Align;
7527 let bytes = &Align::<[u8; 4], u32>::new([0u8; 4]).t[..];
7528 let (r, rest): (&u32, &[u8]) = u32::try_ref_from_prefix(bytes).unwrap();
7529 assert_eq!(*r, 0);
7530 assert_eq!(rest.len(), 0);
7531
7532 let (rest, r): (&[u8], &u32) = u32::try_ref_from_suffix(bytes).unwrap();
7533 assert_eq!(*r, 0);
7534 assert_eq!(rest.len(), 0);
7535 }
7536
7537 #[test]
7538 fn test_raw_dangling() {
7539 use crate::util::AsAddress;
7540 let ptr: NonNull<u32> = u32::raw_dangling();
7541 assert_eq!(AsAddress::addr(ptr), 1);
7542
7543 let ptr: NonNull<[u32]> = <[u32]>::raw_dangling();
7544 assert_eq!(AsAddress::addr(ptr), 1);
7545 }
7546
7547 #[test]
7548 fn test_try_ref_from_prefix_with_elems() {
7549 use crate::util::testutil::Align;
7550 let bytes = &Align::<[u8; 8], u32>::new([0u8; 8]).t[..];
7551 let (r, rest): (&[u32], &[u8]) = <[u32]>::try_ref_from_prefix_with_elems(bytes, 2).unwrap();
7552 assert_eq!(r.len(), 2);
7553 assert_eq!(rest.len(), 0);
7554 }
7555
7556 #[test]
7557 fn test_try_ref_from_suffix_with_elems() {
7558 use crate::util::testutil::Align;
7559 let bytes = &Align::<[u8; 8], u32>::new([0u8; 8]).t[..];
7560 let (rest, r): (&[u8], &[u32]) = <[u32]>::try_ref_from_suffix_with_elems(bytes, 2).unwrap();
7561 assert_eq!(r.len(), 2);
7562 assert_eq!(rest.len(), 0);
7563 }
7564}