From ee9d147e11dd695ae4029f967a18424967a4d204 Mon Sep 17 00:00:00 2001 From: John Vandenberg Date: Sun, 8 Sep 2024 14:28:26 +0800 Subject: [PATCH] Reduce line lengths --- .editorconfig | 10 ++++ CHANGELOG.md | 20 +++++-- docs/src/concepts.md | 4 +- docs/src/concepts/futures.md | 64 ++++++++++++++++++----- docs/src/concepts/tasks.md | 21 ++++++-- docs/src/overview/async-std.md | 10 +++- docs/src/overview/stability-guarantees.md | 4 +- docs/src/patterns.md | 2 +- docs/src/security/policy.md | 14 +++-- src/os/windows/io.rs | 9 ++-- src/rt/mod.rs | 8 ++- src/stream/stream/mod.rs | 3 +- src/task/current.rs | 2 +- 13 files changed, 133 insertions(+), 38 deletions(-) create mode 100644 .editorconfig diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 000000000..41172b7ea --- /dev/null +++ b/.editorconfig @@ -0,0 +1,10 @@ +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +max_line_length = 120 + +[*.md] +max_line_length = 300 diff --git a/CHANGELOG.md b/CHANGELOG.md index 79b290bf1..53a786c64 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -130,11 +130,21 @@ change. # [1.8.0] - 2020-12-04 -This patch introduces `async_std::channel`, a new submodule for our async channels implementation. `channels` have been one of async-std's most requested features, and have existed as "unstable" for the past year. We've been cautious about stabilizing channels, and this caution turned out to be warranted: we realized our channels could hang indefinitely under certain circumstances, and people ended up expressing a need for unbounded channels. - -So today we're introducing the new `async_std::channel` submodule which exports the `async-channel` crate, and we're marking the older unstable `async_std::sync::channel` API as "deprecated". This release includes both APIs, but we intend to stabilize `async_std::channel` and remove the older API in January. This should give dependent projects a month to upgrade, though we can extend that if it proves to be too short. - -The rationale for adding a new top-level `channel` submodule, rather than extending `sync` is that the `std::sync` and `async_std::sync` submodule are a bit of a mess, and the libs team [has been talking about splitting `std::sync` up]([https://github.com/rust-lang/rfcs/pull/2788#discussion_r339092478](https://github.com/rust-lang/rfcs/pull/2788#discussion_r339092478)) into separate modules. The stdlib has to guarantee it'll forever be backwards compatible, but `async-std` does not (we fully expect a 2.0 once we have async closures & traits). So we're experimenting with this change before `std` does, with the expectation that this change can serve as a data point when the libs team decides how to proceed in std. +This patch introduces `async_std::channel`, a new submodule for our async channels implementation. `channels` have been +one of async-std's most requested features, and have existed as "unstable" for the past year. We've been cautious about +stabilizing channels, and this caution turned out to be warranted: we realized our channels could hang indefinitely +under certain circumstances, and people ended up expressing a need for unbounded channels. + +So today we're introducing the new `async_std::channel` submodule which exports the `async-channel` crate, and we're +marking the older unstable `async_std::sync::channel` API as "deprecated". This release includes both APIs, but we +intend to stabilize `async_std::channel` and remove the older API in January. This should give dependent projects a +month to upgrade, though we can extend that if it proves to be too short. + +The rationale for adding a new top-level `channel` submodule, rather than extending `sync` is that the `std::sync` and +`async_std::sync` submodule are a bit of a mess, and the libs team [has been talking about splitting `std::sync` up]([https://github.com/rust-lang/rfcs/pull/2788#discussion_r339092478](https://github.com/rust-lang/rfcs/pull/2788#discussion_r339092478)) +into separate modules. The stdlib has to guarantee it'll forever be backwards compatible, but `async-std` does not +(we fully expect a 2.0 once we have async closures & traits). So we're experimenting with this change before `std` +does, with the expectation that this change can serve as a data point when the libs team decides how to proceed in std. ### Added diff --git a/docs/src/concepts.md b/docs/src/concepts.md index 8e25cb12f..82271037f 100644 --- a/docs/src/concepts.md +++ b/docs/src/concepts.md @@ -4,7 +4,9 @@ However, there are good reasons for that perception. Futures have three concepts at their base that seem to be a constant source of confusion: deferred computation, asynchronicity and independence of execution strategy. -These concepts are not hard, but something many people are not used to. This base confusion is amplified by many implementations oriented on details. Most explanations of these implementations also target advanced users, and can be hard for beginners. We try to provide both easy-to-understand primitives and approachable overviews of the concepts. +These concepts are not hard, but something many people are not used to. This base confusion is amplified by many +implementations oriented on details. Most explanations of these implementations also target advanced users, and can +be hard for beginners. We try to provide both easy-to-understand primitives and approachable overviews of the concepts. Futures are a concept that abstracts over how code is run. By themselves, they do nothing. This is a weird concept in an imperative language, where usually one thing happens after the other - right now. diff --git a/docs/src/concepts/futures.md b/docs/src/concepts/futures.md index 7d9cc6360..2dc314de6 100644 --- a/docs/src/concepts/futures.md +++ b/docs/src/concepts/futures.md @@ -1,26 +1,50 @@ # Futures -A notable point about Rust is [*fearless concurrency*](https://blog.rust-lang.org/2015/04/10/Fearless-Concurrency.html). That is the notion that you should be empowered to do concurrent things, without giving up safety. Also, Rust being a low-level language, it's about fearless concurrency *without picking a specific implementation strategy*. This means we *must* abstract over the strategy, to allow choice *later*, if we want to have any way to share code between users of different strategies. +A notable point about Rust is [*fearless concurrency*](https://blog.rust-lang.org/2015/04/10/Fearless-Concurrency.html). +That is the notion that you should be empowered to do concurrent things, without giving up safety. Also, Rust being a +low-level language, it's about fearless concurrency *without picking a specific implementation strategy*. This means we +*must* abstract over the strategy, to allow choice *later*, if we want to have any way to share code between users of +different strategies. -Futures abstract over *computation*. They describe the "what", independent of the "where" and the "when". For that, they aim to break code into small, composable actions that can then be executed by a part of our system. Let's take a tour through what it means to compute things to find where we can abstract. +Futures abstract over *computation*. They describe the "what", independent of the "where" and the "when". For that, +they aim to break code into small, composable actions that can then be executed by a part of our system. Let's take a +tour through what it means to compute things to find where we can abstract. ## Send and Sync -Luckily, concurrent Rust already has two well-known and effective concepts abstracting over sharing between concurrent parts of a program: `Send` and `Sync`. Notably, both the `Send` and `Sync` traits abstract over *strategies* of concurrent work, compose neatly, and don't prescribe an implementation. +Luckily, concurrent Rust already has two well-known and effective concepts abstracting over sharing between concurrent +parts of a program: `Send` and `Sync`. Notably, both the `Send` and `Sync` traits abstract over *strategies* of +concurrent work, compose neatly, and don't prescribe an implementation. As a quick summary: -- `Send` abstracts over *passing data* in a computation to another concurrent computation (let's call it the receiver), losing access to it on the sender side. In many programming languages, this strategy is commonly implemented, but missing support from the language side, and expects you to enforce the "losing access" behaviour yourself. This is a regular source of bugs: senders keeping handles to sent things around and maybe even working with them after sending. Rust mitigates this problem by making this behaviour known. Types can be `Send` or not (by implementing the appropriate marker trait), allowing or disallowing sending them around, and the ownership and borrowing rules prevent subsequent access. - -- `Sync` is about *sharing data* between two concurrent parts of a program. This is another common pattern: as writing to a memory location or reading while another party is writing is inherently unsafe, this access needs to be moderated through synchronisation.[^1] There are many common ways for two parties to agree on not using the same part in memory at the same time, for example mutexes and spinlocks. Again, Rust gives you the option of (safely!) not caring. Rust gives you the ability to express that something *needs* synchronisation while not being specific about the *how*. - -Note how we avoided any word like *"thread"*, but instead opted for "computation". The full power of `Send` and `Sync` is that they relieve you of the burden of knowing *what* shares. At the point of implementation, you only need to know which method of sharing is appropriate for the type at hand. This keeps reasoning local and is not influenced by whatever implementation the user of that type later uses. +- `Send` abstracts over *passing data* in a computation to another concurrent computation (let's call it the receiver), + losing access to it on the sender side. In many programming languages, this strategy is commonly implemented, but + missing support from the language side, and expects you to enforce the "losing access" behaviour yourself. + This is a regular source of bugs: senders keeping handles to sent things around and maybe even working with them + after sending. Rust mitigates this problem by making this behaviour known. Types can be `Send` or not + (by implementing the appropriate marker trait), allowing or disallowing sending them around, and the ownership and + borrowing rules prevent subsequent access. + +- `Sync` is about *sharing data* between two concurrent parts of a program. This is another common pattern: as writing + to a memory location or reading while another party is writing is inherently unsafe, this access needs to be + moderated through synchronisation.[^1] There are many common ways for two parties to agree on not using the same part + in memory at the same time, for example mutexes and spinlocks. Again, Rust gives you the option of (safely!) not + caring. Rust gives you the ability to express that something *needs* synchronisation while not being specific about + the *how*. + +Note how we avoided any word like *"thread"*, but instead opted for "computation". The full power of `Send` and `Sync` +is that they relieve you of the burden of knowing *what* shares. At the point of implementation, you only need to know +which method of sharing is appropriate for the type at hand. This keeps reasoning local and is not influenced by +whatever implementation the user of that type later uses. `Send` and `Sync` can be composed in interesting fashions, but that's beyond the scope here. You can find examples in the [Rust Book][rust-book-sync]. [rust-book-sync]: https://doc.rust-lang.org/stable/book/ch16-04-extensible-concurrency-sync-and-send.html -To sum up: Rust gives us the ability to safely abstract over important properties of concurrent programs, their data sharing. It does so in a very lightweight fashion; the language itself only knows about the two markers `Send` and `Sync` and helps us a little by deriving them itself, when possible. The rest is a library concern. +To sum up: Rust gives us the ability to safely abstract over important properties of concurrent programs, their data +sharing. It does so in a very lightweight fashion; the language itself only knows about the two markers `Send` and +`Sync` and helps us a little by deriving them itself, when possible. The rest is a library concern. ## An easy view of computation @@ -28,7 +52,9 @@ While computation is a subject to write a whole [book](https://computationbook.c ## Deferring computation -As mentioned above, `Send` and `Sync` are about data. But programs are not only about data, they also talk about *computing* the data. And that's what [`Futures`][futures] do. We are going to have a close look at how that works in the next chapter. Let's look at what Futures allow us to express, in English. Futures go from this plan: +As mentioned above, `Send` and `Sync` are about data. But programs are not only about data, they also talk about *computing* +the data. And that's what [`Futures`][futures] do. We are going to have a close look at how that works in the next chapter. +Let's look at what Futures allow us to express, in English. Futures go from this plan: - Do X - If X succeeded, do Y @@ -73,7 +99,9 @@ fn read_file(path: &str) -> io::Result { } ``` -Speaking in terms of time, we can only take action *before* calling the function or *after* the function returned. This is not desirable, as it takes from us the ability to do something *while* it runs. When working with parallel code, this would take from us the ability to start a parallel task while the first runs (because we gave away control). +Speaking in terms of time, we can only take action *before* calling the function or *after* the function returned. +This is not desirable, as it takes from us the ability to do something *while* it runs. When working with parallel +code, this would take from us the ability to start a parallel task while the first runs (because we gave away control). This is the moment where we could reach for [threads](https://en.wikipedia.org/wiki/Thread_). But threads are a very specific concurrency primitive and we said that we are searching for an abstraction. @@ -124,9 +152,17 @@ This `async` function sets up a deferred computation. When this function is call ## What does `.await` do? -The `.await` postfix does exactly what it says on the tin: the moment you use it, the code will wait until the requested action (e.g. opening a file or reading all data in it) is finished. The `.await?` is not special, it's just the application of the `?` operator to the result of `.await`. So, what is gained over the initial code example? We're getting futures and then immediately waiting for them? - -The `.await` points act as a marker. Here, the code will wait for a `Future` to produce its value. How will a future finish? You don't need to care! The marker allows the component (usually called the “runtime”) in charge of *executing* this piece of code to take care of all the other things it has to do while the computation finishes. It will come back to this point when the operation you are doing in the background is done. This is why this style of programming is also called *evented programming*. We are waiting for *things to happen* (e.g. a file to be opened) and then react (by starting to read). +The `.await` postfix does exactly what it says on the tin: the moment you use it, the code will wait until the +requested action (e.g. opening a file or reading all data in it) is finished. The `.await?` is not special, it's just +the application of the `?` operator to the result of `.await`. So, what is gained over the initial code example? We're +getting futures and then immediately waiting for them? + +The `.await` points act as a marker. Here, the code will wait for a `Future` to produce its value. How will a future +finish? You don't need to care! The marker allows the component (usually called the “runtime”) in charge of *executing* +this piece of code to take care of all the other things it has to do while the computation finishes. It will come back +to this point when the operation you are doing in the background is done. This is why this style of programming is also +called *evented programming*. We are waiting for *things to happen* (e.g. a file to be opened) and then react +(by starting to read). When executing 2 or more of these functions at the same time, our runtime system is then able to fill the wait time with handling *all the other events* currently going on. diff --git a/docs/src/concepts/tasks.md b/docs/src/concepts/tasks.md index c3dbbe202..d5d3c2dae 100644 --- a/docs/src/concepts/tasks.md +++ b/docs/src/concepts/tasks.md @@ -61,11 +61,20 @@ But let's get to the interesting part: task::spawn(async { }); ``` -`spawn` takes a `Future` and starts running it on a `Task`. It returns a `JoinHandle`. Futures in Rust are sometimes called *cold* Futures. You need something that starts running them. To run a Future, there may be some additional bookkeeping required, e.g. whether it's running or finished, where it is being placed in memory and what the current state is. This bookkeeping part is abstracted away in a `Task`. +`spawn` takes a `Future` and starts running it on a `Task`. It returns a `JoinHandle`. Futures in Rust are sometimes +called *cold* Futures. You need something that starts running them. To run a Future, there may be some additional +bookkeeping required, e.g. whether it's running or finished, where it is being placed in memory and what the current +state is. This bookkeeping part is abstracted away in a `Task`. -A `Task` is similar to a `Thread`, with some minor differences: it will be scheduled by the program instead of the operating system kernel, and if it encounters a point where it needs to wait, the program itself is responsible for waking it up again. We'll talk a little bit about that later. An `async_std` task can also have a name and an ID, just like a thread. +A `Task` is similar to a `Thread`, with some minor differences: it will be scheduled by the program instead of the +operating system kernel, and if it encounters a point where it needs to wait, the program itself is responsible for +waking it up again. We'll talk a little bit about that later. An `async_std` task can also have a name and an ID, +just like a thread. -For now, it is enough to know that once you have `spawn`ed a task, it will continue running in the background. The `JoinHandle` is itself a future that will finish once the `Task` has run to conclusion. Much like with `threads` and the `join` function, we can now call `block_on` on the handle to *block* the program (or the calling thread, to be specific) and wait for it to finish. +For now, it is enough to know that once you have `spawn`ed a task, it will continue running in the background. +The `JoinHandle` is itself a future that will finish once the `Task` has run to conclusion. Much like with `threads` +and the `join` function, we can now call `block_on` on the handle to *block* the program (or the calling thread, to be +specific) and wait for it to finish. ## Tasks in `async_std` @@ -80,7 +89,11 @@ Tasks in `async_std` are one of the core abstractions. Much like Rust's `thread` ## Blocking -`Task`s are assumed to run _concurrently_, potentially by sharing a thread of execution. This means that operations blocking an _operating system thread_, such as `std::thread::sleep` or io function from Rust's `std` library will _stop execution of all tasks sharing this thread_. Other libraries (such as database drivers) have similar behaviour. Note that _blocking the current thread_ is not in and of itself bad behaviour, just something that does not mix well with the concurrent execution model of `async-std`. Essentially, never do this: +`Task`s are assumed to run _concurrently_, potentially by sharing a thread of execution. This means that operations +blocking an _operating system thread_, such as `std::thread::sleep` or io function from Rust's `std` library will +_stop execution of all tasks sharing this thread_. Other libraries (such as database drivers) have similar behaviour. +Note that _blocking the current thread_ is not in and of itself bad behaviour, just something that does not mix well +with the concurrent execution model of `async-std`. Essentially, never do this: ```rust,edition2018 # extern crate async_std; diff --git a/docs/src/overview/async-std.md b/docs/src/overview/async-std.md index 0086599f1..e1117d8fa 100644 --- a/docs/src/overview/async-std.md +++ b/docs/src/overview/async-std.md @@ -1,7 +1,13 @@ # Welcome to `async-std` -`async-std`, along with its [supporting libraries][organization], is a library making your life in async programming easier. It provides fundamental implementations for downstream libraries and applications alike. The name reflects the approach of this library: it is as closely modeled to the Rust main standard library as possible, replacing all components by async counterparts. +`async-std`, along with its [supporting libraries][organization], is a library making your life in async programming +easier. It provides fundamental implementations for downstream libraries and applications alike. The name reflects the +approach of this library: it is as closely modeled to the Rust main standard library as possible, replacing all +components by async counterparts. -`async-std` provides an interface to all important primitives: filesystem operations, network operations and concurrency basics like timers. It also exposes a `task` in a model similar to the `thread` module found in the Rust standard lib. But it does not only include I/O primitives, but also `async/await` compatible versions of primitives like `Mutex`. +`async-std` provides an interface to all important primitives: filesystem operations, network operations and +concurrency basics like timers. It also exposes a `task` in a model similar to the `thread` module found in the Rust +standard lib. But it does not only include I/O primitives, but also `async/await` compatible versions of primitives +like `Mutex`. [organization]: https://github.com/async-rs diff --git a/docs/src/overview/stability-guarantees.md b/docs/src/overview/stability-guarantees.md index 8c14e20fd..e230c510b 100644 --- a/docs/src/overview/stability-guarantees.md +++ b/docs/src/overview/stability-guarantees.md @@ -25,7 +25,9 @@ Please note that our SemVer guarantees don't extend to usage of those interfaces ## Minimum version policy -The current tentative policy is that the minimum Rust version required to use this crate can be increased in minor version updates. For example, if `async-std` 1.0 requires Rust 1.37.0, then `async-std` 1.0.z for all values of z will also require Rust 1.37.0 or newer. However, `async-std` 1.y for y > 0 may require a newer minimum version of Rust. +The current tentative policy is that the minimum Rust version required to use this crate can be increased in minor +version updates. For example, if `async-std` 1.0 requires Rust 1.37.0, then `async-std` 1.0.z for all values of `z` +will also require Rust 1.37.0 or newer. However, `async-std` 1.y for y > 0 may require a newer minimum version of Rust. In general, this crate will be conservative with respect to the minimum supported version of Rust. With `async/await` being a new feature though, we will track changes in a measured pace initially. diff --git a/docs/src/patterns.md b/docs/src/patterns.md index a19b81b41..c9e3229f0 100644 --- a/docs/src/patterns.md +++ b/docs/src/patterns.md @@ -2,4 +2,4 @@ This section documents small, useful patterns. -It is intended to be read at a glance, allowing you to get back when you have a problem. \ No newline at end of file +It is intended to be read at a glance, allowing you to get back when you have a problem. diff --git a/docs/src/security/policy.md b/docs/src/security/policy.md index 06a08b484..72d1308a7 100644 --- a/docs/src/security/policy.md +++ b/docs/src/security/policy.md @@ -2,9 +2,15 @@ Safety is one of the core principles of what we do, and to that end, we would like to ensure that async-std has a secure implementation. Thank you for taking the time to responsibly disclose any issues you find. -All security bugs in async-std distribution should be reported by email to florian.gilcher@ferrous-systems.com. This list is delivered to a small security team. Your email will be acknowledged within 24 hours, and you’ll receive a more detailed response to your email within 48 hours indicating the next steps in handling your report. If you would like, you can encrypt your report using our public key. This key is also On MIT’s keyserver and reproduced below. +All security bugs in async-std distribution should be reported by email to florian.gilcher@ferrous-systems.com. +This list is delivered to a small security team. Your email will be acknowledged within 24 hours, and you’ll receive +a more detailed response to your email within 48 hours indicating the next steps in handling your report. If you would +like, you can encrypt your report using our public key. This key is also On MIT’s keyserver and reproduced below. -Be sure to use a descriptive subject line to avoid having your report be missed. After the initial reply to your report, the security team will endeavor to keep you informed of the progress being made towards a fix and full announcement. As recommended by [RFPolicy][rf-policy], these updates will be sent at least every five days. In reality, this is more likely to be every 24-48 hours. +Be sure to use a descriptive subject line to avoid having your report be missed. After the initial reply to your +report, the security team will endeavor to keep you informed of the progress being made towards a fix and full +announcement. As recommended by [RFPolicy][rf-policy], these updates will be sent at least every five days. In reality, +this is more likely to be every 24-48 hours. If you have not received a reply to your email within 48 hours, or have not heard from the security team for the past five days, there are a few steps you can take (in order): @@ -24,7 +30,9 @@ The async-std project has a 5 step disclosure process. * Fixes are prepared for all releases which are still under maintenance. These fixes are not committed to the public repository but rather held locally pending the announcement. * On the embargo date, the changes are pushed to the public repository and new builds are deployed to crates.io. Within 6 hours, a copy of the advisory will be published on the the async.rs blog. -This process can take some time, especially when coordination is required with maintainers of other projects. Every effort will be made to handle the bug in as timely a manner as possible, however it's important that we follow the release process above to ensure that the disclosure is handled in a consistent manner. +This process can take some time, especially when coordination is required with maintainers of other projects. +Every effort will be made to handle the bug in as timely a manner as possible, however it's important that we follow +the release process above to ensure that the disclosure is handled in a consistent manner. ## Credits diff --git a/src/os/windows/io.rs b/src/os/windows/io.rs index caffc6fc6..927e51ab1 100644 --- a/src/os/windows/io.rs +++ b/src/os/windows/io.rs @@ -58,11 +58,12 @@ cfg_docs! { pub trait FromRawSocket { /// Creates a new I/O object from the given raw socket. /// - /// This function will consume ownership of the socket provided and it will be closed when the returned object goes out of scope. + /// This function will consume ownership of the socket provided and it will be closed when the returned object + /// goes out of scope. /// - /// This function is also unsafe as the primitives currently returned have the contract that they are the sole owner of the - /// file descriptor they are wrapping. Usage of this function could accidentally allow violating this contract which can cause - /// memory unsafety in code that relies on it being true. + /// This function is also unsafe as the primitives currently returned have the contract that they are the sole + /// owner of the file descriptor they are wrapping. Usage of this function could accidentally allow violating + /// this contract which can cause memory unsafety in code that relies on it being true. unsafe fn from_raw_socket(sock: RawSocket) -> Self; } diff --git a/src/rt/mod.rs b/src/rt/mod.rs index 80f1c4e6b..a08ca154b 100644 --- a/src/rt/mod.rs +++ b/src/rt/mod.rs @@ -12,7 +12,13 @@ pub static RUNTIME: Lazy = Lazy::new(|| { // Create an executor thread pool. let thread_name = env::var("ASYNC_STD_THREAD_NAME").unwrap_or_else(|_| "async-std/runtime".to_string()); - async_global_executor::init_with_config(async_global_executor::GlobalExecutorConfig::default().with_env_var("ASYNC_STD_THREAD_COUNT").with_thread_name_fn(move || thread_name.clone())); + async_global_executor::init_with_config( + async_global_executor::GlobalExecutorConfig::default() + .with_env_var("ASYNC_STD_THREAD_COUNT") + .with_thread_name_fn( + move || thread_name.clone() + ) + ); Runtime {} }); diff --git a/src/stream/stream/mod.rs b/src/stream/stream/mod.rs index 144194d24..e09d63be0 100644 --- a/src/stream/stream/mod.rs +++ b/src/stream/stream/mod.rs @@ -1714,7 +1714,8 @@ pub trait StreamExt: Stream { #[doc = r#" Converts an stream of pairs into a pair of containers. - `unzip()` consumes an entire stream of pairs, producing two collections: one from the left elements of the pairs, and one from the right elements. + `unzip()` consumes an entire stream of pairs, producing two collections: one from the left elements of the + pairs, and one from the right elements. This function is, in some sense, the opposite of [`zip`]. diff --git a/src/task/current.rs b/src/task/current.rs index ad354d629..c3a0da312 100644 --- a/src/task/current.rs +++ b/src/task/current.rs @@ -45,4 +45,4 @@ pub fn current() -> Task { /// ``` pub fn try_current() -> Option { TaskLocalsWrapper::get_current(|t| t.task().clone()) -} \ No newline at end of file +}