diff --git a/rpc/README.md b/rpc/README.md
index 016b32b8b8..ade3cd07d0 100644
--- a/rpc/README.md
+++ b/rpc/README.md
@@ -1070,9 +1070,10 @@ The response looks like below when `verbosity` is 0.
#### Method `get_live_cell`
-* `get_live_cell(out_point, with_data)`
+* `get_live_cell(out_point, with_data, include_tx_pool)`
* `out_point`: [`OutPoint`](#type-outpoint)
* `with_data`: `boolean`
+ * `include_tx_pool`: `boolean` `|` `null`
* result: [`CellWithStatus`](#type-cellwithstatus)
Returns the status of a cell. The RPC returns extra information if it is a [live cell](#live-cell).
@@ -1092,6 +1093,7 @@ result.
* `out_point` - Reference to the cell by transaction hash and output index.
* `with_data` - Whether the RPC should return cell data. Cell data can be huge, if the client
does not need the data, it should set this to `false` to save bandwidth.
+* `include_tx_pool` - Whether the RPC check live cell in TxPool, default is false.
###### Examples
diff --git a/rpc/src/module/chain.rs b/rpc/src/module/chain.rs
index 462dd8ea94..a7d2c861fb 100644
--- a/rpc/src/module/chain.rs
+++ b/rpc/src/module/chain.rs
@@ -785,6 +785,7 @@ pub trait ChainRpc {
/// * `out_point` - Reference to the cell by transaction hash and output index.
/// * `with_data` - Whether the RPC should return cell data. Cell data can be huge, if the client
/// does not need the data, it should set this to `false` to save bandwidth.
+ /// * `include_tx_pool` - Whether the RPC check live cell in TxPool, default is false.
///
/// ## Examples
///
@@ -832,7 +833,12 @@ pub trait ChainRpc {
/// }
/// ```
#[rpc(name = "get_live_cell")]
- fn get_live_cell(&self, out_point: OutPoint, with_data: bool) -> Result;
+ fn get_live_cell(
+ &self,
+ out_point: OutPoint,
+ with_data: bool,
+ include_tx_pool: Option,
+ ) -> Result;
/// Returns the highest block number in the [canonical chain](#canonical-chain).
///
@@ -1813,12 +1819,23 @@ impl ChainRpc for ChainRpcImpl {
}))
}
- fn get_live_cell(&self, out_point: OutPoint, with_data: bool) -> Result {
- let cell_status = self
- .shared
- .snapshot()
- .as_ref()
- .cell(&out_point.into(), with_data);
+ fn get_live_cell(
+ &self,
+ out_point: OutPoint,
+ with_data: bool,
+ include_tx_pool: Option,
+ ) -> Result {
+ let cell_status: CellStatus = if include_tx_pool.unwrap_or_default() {
+ self.shared
+ .tx_pool_controller()
+ .get_live_cell(out_point.into(), with_data)
+ .map_err(|err| RPCError::custom(RPCError::CKBInternalError, err.to_string()))?
+ } else {
+ self.shared
+ .snapshot()
+ .as_ref()
+ .cell(&out_point.into(), with_data)
+ };
Ok(cell_status.into())
}
diff --git a/tx-pool/src/service.rs b/tx-pool/src/service.rs
index 64f17f4aa9..e684508127 100644
--- a/tx-pool/src/service.rs
+++ b/tx-pool/src/service.rs
@@ -18,7 +18,10 @@ use ckb_logger::{error, info};
use ckb_network::{NetworkController, PeerIndex};
use ckb_snapshot::Snapshot;
use ckb_stop_handler::new_tokio_exit_rx;
+use ckb_store::ChainStore;
+use ckb_types::core::cell::{CellProvider, CellStatus, OverlayCellProvider};
use ckb_types::core::tx_pool::{EntryCompleted, PoolTxDetailInfo, TransactionWithStatus, TxStatus};
+use ckb_types::packed::OutPoint;
use ckb_types::{
core::{
tx_pool::{Reject, TxPoolEntryInfo, TxPoolIds, TxPoolInfo, TRANSACTION_SIZE_LIMIT},
@@ -39,6 +42,7 @@ use tokio::sync::{mpsc, RwLock};
use tokio::task::block_in_place;
use tokio_util::sync::CancellationToken;
+use crate::pool_cell::PoolCell;
#[cfg(feature = "internal")]
use crate::{component::entry::TxEntry, process::PlugTarget};
@@ -98,6 +102,7 @@ pub(crate) enum Message {
FetchTxs(Request, HashMap>),
FetchTxsWithCycles(Request, FetchTxsWithCyclesResult>),
GetTxPoolInfo(Request<(), TxPoolInfo>),
+ GetLiveCell(Request<(OutPoint, bool), CellStatus>),
GetTxStatus(Request),
GetTransactionWithStatus(Request),
NewUncle(Notify),
@@ -260,6 +265,15 @@ impl TxPoolController {
send_message!(self, GetTxPoolInfo, ())
}
+ /// Return tx-pool information
+ pub fn get_live_cell(
+ &self,
+ out_point: OutPoint,
+ with_data: bool,
+ ) -> Result {
+ send_message!(self, GetLiveCell, (out_point, with_data))
+ }
+
/// Return fresh proposals
pub fn fresh_proposals_filter(
&self,
@@ -679,6 +693,15 @@ async fn process(mut service: TxPoolService, message: Message) {
error!("Responder sending get_tx_pool_info failed {:?}", e);
};
}
+ Message::GetLiveCell(Request {
+ responder,
+ arguments: (out_point, with_data),
+ }) => {
+ let live_cell_status = service.get_live_cell(out_point, with_data).await;
+ if let Err(e) = responder.send(live_cell_status) {
+ error!("Responder sending get_live_cell failed {:?}", e);
+ };
+ }
Message::BlockTemplate(Request {
responder,
arguments: (_bytes_limit, _proposals_limit, _max_version),
@@ -955,6 +978,27 @@ impl TxPoolService {
}
}
+ /// Get Live Cell Status
+ async fn get_live_cell(&self, out_point: OutPoint, eager_load: bool) -> CellStatus {
+ let tx_pool = self.tx_pool.read().await;
+ let snapshot = tx_pool.snapshot();
+ let pool_cell = PoolCell::new(&tx_pool.pool_map, false);
+ let provider = OverlayCellProvider::new(&pool_cell, snapshot);
+
+ match provider.cell(&out_point, false) {
+ CellStatus::Live(mut cell_meta) => {
+ if eager_load {
+ if let Some((data, data_hash)) = snapshot.get_cell_data(&out_point) {
+ cell_meta.mem_cell_data = Some(data);
+ cell_meta.mem_cell_data_hash = Some(data_hash);
+ }
+ }
+ CellStatus::live_cell(cell_meta)
+ }
+ _ => CellStatus::Unknown,
+ }
+ }
+
pub fn should_notify_block_assembler(&self) -> bool {
self.block_assembler.is_some()
}