Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix(ext/node): handle 'upgrade' responses #19412

Merged
merged 20 commits into from
Jun 13, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
50 changes: 50 additions & 0 deletions cli/tests/unit_node/http_test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -649,3 +649,53 @@ Deno.test("[node/http] HTTPS server", async () => {
await Promise.all([promise, promise2]);
client.close();
});

Deno.test(
"[node/http] client upgrade",
{ permissions: { net: true } },
async () => {
const promise = deferred();
const server = http.createServer((_req, res) => {
res.writeHead(200, { "Content-Type": "text/plain" });
res.end("okay");
});
// @ts-ignore it's a socket for real
let serverSocket;
server.on("upgrade", (_req, socket, _head) => {
socket.write(
"HTTP/1.1 101 Web Socket Protocol Handshake\r\n" +
"Upgrade: WebSocket\r\n" +
"Connection: Upgrade\r\n" +
"\r\n",
);
serverSocket = socket;
});

// Now that server is running
server.listen(1337, "127.0.0.1", () => {
// make a request
const options = {
port: 1337,
host: "127.0.0.1",
headers: {
"Connection": "Upgrade",
"Upgrade": "websocket",
},
};

const req = http.request(options);
req.end();

req.on("upgrade", (_res, socket, _upgradeHead) => {
socket.end();
// @ts-ignore it's a socket for real
serverSocket!.end();
server.close(() => {
promise.resolve();
});
});
});

await promise;
},
);
2 changes: 1 addition & 1 deletion ext/fetch/26_fetch.js
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ function opFetch(method, url, headers, clientRid, hasBody, bodyLength, body) {
* @returns {Promise<{ status: number, statusText: string, headers: [string, string][], url: string, responseRid: number }>}
*/
function opFetchSend(rid) {
return core.opAsync("op_fetch_send", rid);
return core.opAsync("op_fetch_send", rid, true);
}

/**
Expand Down
191 changes: 178 additions & 13 deletions ext/fetch/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ use deno_core::op;
use deno_core::BufView;
use deno_core::WriteOutcome;

use deno_core::task::spawn;
use deno_core::url::Url;
use deno_core::AsyncRefCell;
use deno_core::AsyncResult;
Expand Down Expand Up @@ -58,6 +59,8 @@ use reqwest::RequestBuilder;
use reqwest::Response;
use serde::Deserialize;
use serde::Serialize;
use tokio::io::AsyncReadExt;
use tokio::io::AsyncWriteExt;
use tokio::sync::mpsc;

// Re-export reqwest and data_url
Expand Down Expand Up @@ -109,6 +112,8 @@ deno_core::extension!(deno_fetch,
ops = [
op_fetch<FP>,
op_fetch_send,
op_fetch_response_into_byte_stream,
op_fetch_response_upgrade,
op_fetch_custom_client<FP>,
],
esm = [
Expand Down Expand Up @@ -414,12 +419,15 @@ pub struct FetchResponse {
pub url: String,
pub response_rid: ResourceId,
pub content_length: Option<u64>,
pub remote_addr_ip: Option<String>,
pub remote_addr_port: Option<u16>,
}

#[op]
pub async fn op_fetch_send(
state: Rc<RefCell<OpState>>,
rid: ResourceId,
into_byte_stream: bool,
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

not a big fan that we have this option and also the op.
Is there a considerable perf difference if we do have this option and call the two ops after each other for fetch?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Probably not - let's use this option for now - we will be rewriting all of that code to drop reqwest and we can clean it all up then.

) -> Result<FetchResponse, AnyError> {
let request = state
.borrow_mut()
Expand All @@ -436,7 +444,6 @@ pub async fn op_fetch_send(
Err(_) => return Err(type_error("request was cancelled")),
};

//debug!("Fetch response {}", url);
let status = res.status();
let url = res.url().to_string();
let mut res_headers = Vec::new();
Expand All @@ -445,29 +452,175 @@ pub async fn op_fetch_send(
}

let content_length = res.content_length();
let remote_addr = res.remote_addr();
let (remote_addr_ip, remote_addr_port) = if let Some(addr) = remote_addr {
(Some(addr.ip().to_string()), Some(addr.port()))
} else {
(None, None)
};

let stream: BytesStream = Box::pin(res.bytes_stream().map(|r| {
r.map_err(|err| std::io::Error::new(std::io::ErrorKind::Other, err))
}));
let rid = state
.borrow_mut()
.resource_table
.add(FetchResponseBodyResource {
reader: AsyncRefCell::new(stream.peekable()),
cancel: CancelHandle::default(),
size: content_length,
});
let response_rid = if !into_byte_stream {
state
.borrow_mut()
.resource_table
.add(FetchResponseResource {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think we can clean this up down the line so that we always create a FetchResponseResource that can internally either be the full response, or deconstructed into a stream (an inner enum). And FetchResponseBodyResource goes away. The first read would deconstruct automatically. This is what we used to do for HTTP too - not sure if we still do that.

That way the caller would not have to choose whether they want to deconstruction or not. Depending on which ops you use later, it either happens or not.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

response: res,
size: content_length,
})
} else {
let stream: BytesStream = Box::pin(res.bytes_stream().map(|r| {
r.map_err(|err| std::io::Error::new(std::io::ErrorKind::Other, err))
}));
state
.borrow_mut()
.resource_table
.add(FetchResponseBodyResource {
reader: AsyncRefCell::new(stream.peekable()),
cancel: CancelHandle::default(),
size: content_length,
})
};

Ok(FetchResponse {
status: status.as_u16(),
status_text: status.canonical_reason().unwrap_or("").to_string(),
headers: res_headers,
url,
response_rid: rid,
response_rid,
content_length,
remote_addr_ip,
remote_addr_port,
})
}

#[op]
pub fn op_fetch_response_into_byte_stream(
state: &mut OpState,
rid: ResourceId,
) -> Result<ResourceId, AnyError> {
let raw_response = state.resource_table.take::<FetchResponseResource>(rid)?;
let raw_response = Rc::try_unwrap(raw_response)
.expect("Someone is holding onto FetchResponseResource");
let stream: BytesStream =
Box::pin(raw_response.response.bytes_stream().map(|r| {
r.map_err(|err| std::io::Error::new(std::io::ErrorKind::Other, err))
}));

let rid = state.resource_table.add(FetchResponseBodyResource {
reader: AsyncRefCell::new(stream.peekable()),
cancel: CancelHandle::default(),
size: raw_response.size,
});

Ok(rid)
}

#[op]
pub async fn op_fetch_response_upgrade(
state: Rc<RefCell<OpState>>,
rid: ResourceId,
) -> Result<ResourceId, AnyError> {
let raw_response = state
.borrow_mut()
.resource_table
.take::<FetchResponseResource>(rid)?;
let raw_response = Rc::try_unwrap(raw_response)
.expect("Someone is holding onto FetchResponseResource");

let (read, write) = tokio::io::duplex(1024);
let (read_rx, write_tx) = tokio::io::split(read);
let (mut write_rx, mut read_tx) = tokio::io::split(write);
let upgraded = raw_response.response.upgrade().await?;
{
// Stage 3: Pump the data
let (mut upgraded_rx, mut upgraded_tx) = tokio::io::split(upgraded);

spawn(async move {
let mut buf = [0; 1024];
loop {
let read = upgraded_rx.read(&mut buf).await?;
if read == 0 {
break;
}
read_tx.write_all(&buf[..read]).await?;
}
Ok::<_, AnyError>(())
});
spawn(async move {
let mut buf = [0; 1024];
loop {
let read = write_rx.read(&mut buf).await?;
if read == 0 {
break;
}
upgraded_tx.write_all(&buf[..read]).await?;
}
Ok::<_, AnyError>(())
});
}

Ok(
state
.borrow_mut()
.resource_table
.add(UpgradeStream::new(read_rx, write_tx)),
)
}

struct UpgradeStream {
read: AsyncRefCell<tokio::io::ReadHalf<tokio::io::DuplexStream>>,
write: AsyncRefCell<tokio::io::WriteHalf<tokio::io::DuplexStream>>,
cancel_handle: CancelHandle,
}

impl UpgradeStream {
pub fn new(
read: tokio::io::ReadHalf<tokio::io::DuplexStream>,
write: tokio::io::WriteHalf<tokio::io::DuplexStream>,
) -> Self {
Self {
read: AsyncRefCell::new(read),
write: AsyncRefCell::new(write),
cancel_handle: CancelHandle::new(),
}
}

async fn read(self: Rc<Self>, buf: &mut [u8]) -> Result<usize, AnyError> {
let cancel_handle = RcRef::map(self.clone(), |this| &this.cancel_handle);
async {
let read = RcRef::map(self, |this| &this.read);
let mut read = read.borrow_mut().await;
Ok(Pin::new(&mut *read).read(buf).await?)
}
.try_or_cancel(cancel_handle)
.await
}

async fn write(self: Rc<Self>, buf: &[u8]) -> Result<usize, AnyError> {
let cancel_handle = RcRef::map(self.clone(), |this| &this.cancel_handle);
async {
let write = RcRef::map(self, |this| &this.write);
let mut write = write.borrow_mut().await;
Ok(Pin::new(&mut *write).write(buf).await?)
}
.try_or_cancel(cancel_handle)
.await
}
}

impl Resource for UpgradeStream {
fn name(&self) -> Cow<str> {
"fetchUpgradedStream".into()
}

deno_core::impl_readable_byob!();
deno_core::impl_writable!();

fn close(self: Rc<Self>) {
self.cancel_handle.cancel();
}
}

type CancelableResponseResult = Result<Result<Response, AnyError>, Canceled>;

pub struct FetchRequestResource(
Expand Down Expand Up @@ -545,6 +698,18 @@ impl Resource for FetchRequestBodyResource {
type BytesStream =
Pin<Box<dyn Stream<Item = Result<bytes::Bytes, std::io::Error>> + Unpin>>;

#[derive(Debug)]
pub struct FetchResponseResource {
pub response: Response,
pub size: Option<u64>,
}

impl Resource for FetchResponseResource {
fn name(&self) -> Cow<str> {
"fetchResponse".into()
}
}

pub struct FetchResponseBodyResource {
pub reader: AsyncRefCell<Peekable<BytesStream>>,
pub cancel: CancelHandle,
Expand Down
Loading