diff --git a/go.mod b/go.mod index 7e12cef..95f17e6 100644 --- a/go.mod +++ b/go.mod @@ -2,4 +2,9 @@ module github.com/goplus/llgoexamples go 1.20 -require github.com/goplus/llgo v0.9.3-0.20240726020431-98d075728f2b +require ( + github.com/goplus/llgo v0.9.8-0.20240919105235-c6436ea6d196 + golang.org/x/net v0.28.0 +) + +require golang.org/x/text v0.17.0 // indirect diff --git a/go.sum b/go.sum index fdc017f..08150d6 100644 --- a/go.sum +++ b/go.sum @@ -1,8 +1,6 @@ -github.com/goplus/llgo v0.9.0 h1:yaJzQperGUafEaHc9VlVQVskIngacoTNweEXY0GRi0Q= -github.com/goplus/llgo v0.9.0/go.mod h1:M3UwiYdPZFyx7m2J0+6Ti1dYVA3uOO1WvSBocuE8N7M= -github.com/goplus/llgo v0.9.1-0.20240709104849-d6a38a567fda h1:UIPwlgzCb8dV/7WFMyprhZuq8CSLAQIqwFpH5AhrNOM= -github.com/goplus/llgo v0.9.1-0.20240709104849-d6a38a567fda/go.mod h1:zsrtWZapL4aklZc99xBSZRynGzLTIT1mLRjP0VSn9iw= -github.com/goplus/llgo v0.9.1-0.20240712060421-858d38d314a3 h1:2fZ2zQ8S58KvOsJTx6s6MHoi6n1K4sqQwIbTauMrgEE= -github.com/goplus/llgo v0.9.1-0.20240712060421-858d38d314a3/go.mod h1:zsrtWZapL4aklZc99xBSZRynGzLTIT1mLRjP0VSn9iw= -github.com/goplus/llgo v0.9.3-0.20240726020431-98d075728f2b h1:z9FUoeAALL5ytBhhGhE1dXm4+L1Q2eMUTcfiqLAZgf8= -github.com/goplus/llgo v0.9.3-0.20240726020431-98d075728f2b/go.mod h1:zsrtWZapL4aklZc99xBSZRynGzLTIT1mLRjP0VSn9iw= +github.com/goplus/llgo v0.9.8-0.20240919105235-c6436ea6d196 h1:LckJktvgChf3x0eex+GT//JkYRj1uiT4uMLzyrg3ChU= +github.com/goplus/llgo v0.9.8-0.20240919105235-c6436ea6d196/go.mod h1:5Fs+08NslqofJ7xtOiIXugkurYOoQvY02ZkFNWA1uEI= +golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= +golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= +golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= diff --git a/x/net/http/_demo/chunked/chunked.go b/x/net/http/_demo/chunked/chunked.go new file mode 100644 index 0000000..7b33c0c --- /dev/null +++ b/x/net/http/_demo/chunked/chunked.go @@ -0,0 +1,29 @@ +package main + +import ( + "fmt" + "io" + + "github.com/goplus/llgoexamples/x/net/http" +) + +func main() { + resp, err := http.Get("http://localhost:8080/chunked") + if err != nil { + fmt.Println(err) + return + } + defer resp.Body.Close() + fmt.Println(resp.Status, "read bytes: ", resp.ContentLength) + for key, values := range resp.Header { + for _, value := range values { + fmt.Printf("%s: %s\n", key, value) + } + } + body, err := io.ReadAll(resp.Body) + if err != nil { + fmt.Println(err) + return + } + fmt.Println(string(body)) +} diff --git a/x/net/http/_demo/get/get.go b/x/net/http/_demo/get/get.go new file mode 100644 index 0000000..392cc72 --- /dev/null +++ b/x/net/http/_demo/get/get.go @@ -0,0 +1,29 @@ +package main + +import ( + "fmt" + "io" + + "github.com/goplus/llgoexamples/x/net/http" +) + +func main() { + resp, err := http.Get("https://www.baidu.com") + if err != nil { + fmt.Println(err) + return + } + defer resp.Body.Close() + fmt.Println(resp.Status, "read bytes: ", resp.ContentLength) + for key, values := range resp.Header { + for _, value := range values { + fmt.Printf("%s: %s\n", key, value) + } + } + body, err := io.ReadAll(resp.Body) + if err != nil { + fmt.Println(err) + return + } + fmt.Println(string(body)) +} diff --git a/x/net/http/_demo/headers/headers.go b/x/net/http/_demo/headers/headers.go new file mode 100644 index 0000000..41cc15f --- /dev/null +++ b/x/net/http/_demo/headers/headers.go @@ -0,0 +1,52 @@ +package main + +import ( + "fmt" + "io" + + "github.com/goplus/llgoexamples/x/net/http" +) + +func main() { + client := &http.Client{} + req, err := http.NewRequest("GET", "https://www.baidu.com", nil) + if err != nil { + println(err.Error()) + return + } + + //req.Header.Set("accept", "*/*") + req.Header.Set("accept-encoding", "gzip") + //req.Header.Set("cache-control", "no-cache") + //req.Header.Set("pragma", "no-cache") + //req.Header.Set("priority", "u=0, i") + //req.Header.Set("referer", "https://jsonplaceholder.typicode.com/") + //req.Header.Set("sec-ch-ua", "\"Not)A;Brand\";v=\"99\", \"Google Chrome\";v=\"127\", \"Chromium\";v=\"127\"") + //req.Header.Set("sec-ch-ua-mobile", "?0") + //req.Header.Set("sec-ch-ua-platform", "\"macOS\"") + //req.Header.Set("sec-fetch-dest", "document") + //req.Header.Set("sec-fetch-mode", "navigate") + //req.Header.Set("sec-fetch-site", "same-origin") + //req.Header.Set("sec-fetch-user", "?1") + ////req.Header.Set("upgrade-insecure-requests", "1") + //req.Header.Set("user-agent", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36") + + resp, err := client.Do(req) + if err != nil { + println(err.Error()) + return + } + defer resp.Body.Close() + fmt.Println(resp.Status) + for key, values := range resp.Header { + for _, value := range values { + fmt.Printf("%s: %s\n", key, value) + } + } + body, err := io.ReadAll(resp.Body) + if err != nil { + println(err.Error()) + return + } + fmt.Println(string(body)) +} diff --git a/x/net/http/_demo/maxConnsPerHost/maxConnsPerHost.go b/x/net/http/_demo/maxConnsPerHost/maxConnsPerHost.go new file mode 100644 index 0000000..eff95fc --- /dev/null +++ b/x/net/http/_demo/maxConnsPerHost/maxConnsPerHost.go @@ -0,0 +1,36 @@ +package main + +import ( + "fmt" + "io" + + "github.com/goplus/llgoexamples/x/net/http" +) + +func main() { + client := &http.Client{ + Transport: &http.Transport{ + MaxConnsPerHost: 2, + }, + } + req, err := http.NewRequest("GET", "https://www.baidu.com", nil) + resp, err := client.Do(req) + if err != nil { + fmt.Println(err) + return + } + defer resp.Body.Close() + fmt.Println(resp.Status, "read bytes: ", resp.ContentLength) + fmt.Println(resp.Proto) + for key, values := range resp.Header { + for _, value := range values { + fmt.Printf("%s: %s\n", key, value) + } + } + body, err := io.ReadAll(resp.Body) + if err != nil { + fmt.Println(err) + return + } + fmt.Println(string(body)) +} diff --git a/x/net/http/_demo/parallelRequest/parallelRequest.go b/x/net/http/_demo/parallelRequest/parallelRequest.go new file mode 100644 index 0000000..0bcb336 --- /dev/null +++ b/x/net/http/_demo/parallelRequest/parallelRequest.go @@ -0,0 +1,43 @@ +package main + +import ( + "fmt" + "sync" + + "github.com/goplus/llgoexamples/x/net/http" +) + +func worker(id int, wg *sync.WaitGroup) { + defer wg.Done() + resp, err := http.Get("http://www.baidu.com") + if err != nil { + fmt.Println(err) + return + } + fmt.Println(id, ":", resp.Status) + //body, err := io.ReadAll(resp.Body) + //if err != nil { + // fmt.Println(err) + // return + //} + //fmt.Println(string(body)) + resp.Body.Close() +} + +func main() { + var wait sync.WaitGroup + for i := 0; i < 500; i++ { + wait.Add(1) + go worker(i, &wait) + } + wait.Wait() + fmt.Println("All done") + + resp, err := http.Get("http://www.baidu.com") + if err != nil { + fmt.Println(err) + return + } + fmt.Println(resp.Status) + resp.Body.Close() +} diff --git a/x/net/http/_demo/post/post.go b/x/net/http/_demo/post/post.go new file mode 100644 index 0000000..b700028 --- /dev/null +++ b/x/net/http/_demo/post/post.go @@ -0,0 +1,31 @@ +package main + +import ( + "bytes" + "fmt" + "io" + + "github.com/goplus/llgoexamples/x/net/http" +) + +func main() { + data := []byte(`{"id":1,"title":"foo","body":"bar","userId":"1"}`) + resp, err := http.Post("https://jsonplaceholder.typicode.com/posts", "application/json; charset=UTF-8", bytes.NewBuffer(data)) + if err != nil { + fmt.Println(err) + return + } + defer resp.Body.Close() + fmt.Println(resp.Status) + for key, values := range resp.Header { + for _, value := range values { + fmt.Printf("%s: %s\n", key, value) + } + } + body, err := io.ReadAll(resp.Body) + if err != nil { + fmt.Println(err) + return + } + fmt.Println(string(body)) +} diff --git a/x/net/http/_demo/postform/postform.go b/x/net/http/_demo/postform/postform.go new file mode 100644 index 0000000..232c15d --- /dev/null +++ b/x/net/http/_demo/postform/postform.go @@ -0,0 +1,29 @@ +package main + +import ( + "fmt" + "io" + "net/url" + + "github.com/goplus/llgoexamples/x/net/http" +) + +func main() { + formData := url.Values{ + "name": {"John Doe"}, + "email": {"johndoe@example.com"}, + } + + resp, err := http.PostForm("http://httpbin.org/post", formData) + if err != nil { + fmt.Println(err) + return + } + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + fmt.Println(err) + return + } + fmt.Println(string(body)) +} diff --git a/x/net/http/_demo/redirect/redirect.go b/x/net/http/_demo/redirect/redirect.go new file mode 100644 index 0000000..3d40f3b --- /dev/null +++ b/x/net/http/_demo/redirect/redirect.go @@ -0,0 +1,30 @@ +package main + +import ( + "fmt" + "io" + + "github.com/goplus/llgoexamples/x/net/http" +) + +func main() { + resp, err := http.Get("http://localhost:8080") // Start "../server/redirectServer.go" before running + if err != nil { + fmt.Println(err) + return + } + defer resp.Body.Close() + fmt.Println(resp.Status, "read bytes: ", resp.ContentLength) + fmt.Println(resp.Proto) + for key, values := range resp.Header { + for _, value := range values { + fmt.Printf("%s: %s\n", key, value) + } + } + body, err := io.ReadAll(resp.Body) + if err != nil { + fmt.Println(err) + return + } + fmt.Println(string(body)) +} diff --git a/x/net/http/_demo/reuseConn/reuseConn.go b/x/net/http/_demo/reuseConn/reuseConn.go new file mode 100644 index 0000000..bccfe9d --- /dev/null +++ b/x/net/http/_demo/reuseConn/reuseConn.go @@ -0,0 +1,50 @@ +package main + +import ( + "fmt" + "io" + + "github.com/goplus/llgoexamples/x/net/http" +) + +func main() { + // Send request first time + resp, err := http.Get("https://www.baidu.com") + if err != nil { + fmt.Println(err) + return + } + fmt.Println(resp.Status, "read bytes: ", resp.ContentLength) + for key, values := range resp.Header { + for _, value := range values { + fmt.Printf("%s: %s\n", key, value) + } + } + body, err := io.ReadAll(resp.Body) + if err != nil { + fmt.Println(err) + return + } + fmt.Println(string(body)) + resp.Body.Close() + + // Send request second time + resp, err = http.Get("https://www.baidu.com") + if err != nil { + fmt.Println(err) + return + } + fmt.Println(resp.Status, "read bytes: ", resp.ContentLength) + for key, values := range resp.Header { + for _, value := range values { + fmt.Printf("%s: %s\n", key, value) + } + } + body, err = io.ReadAll(resp.Body) + if err != nil { + fmt.Println(err) + return + } + fmt.Println(string(body)) + resp.Body.Close() +} diff --git a/x/net/http/_demo/server/chunkedServer.go b/x/net/http/_demo/server/chunkedServer.go new file mode 100644 index 0000000..b79ad60 --- /dev/null +++ b/x/net/http/_demo/server/chunkedServer.go @@ -0,0 +1,42 @@ +package main + +import ( + "fmt" + "net/http" +) + +func chunkedHandler(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Transfer-Encoding", "chunked") + w.Header().Set("Content-Type", "text/plain") + + flusher, ok := w.(http.Flusher) + if !ok { + http.Error(w, "Streaming unsupported!", http.StatusInternalServerError) + return + } + + sentence := "This is a chunked encoded response. It will be sent in multiple parts. Note the delay between each section." + + words := []string{} + start := 0 + for i, r := range sentence { + if r == '。' || r == ',' || i == len(sentence)-1 { + words = append(words, sentence[start:i+1]) + start = i + 1 + } + } + + for _, word := range words { + fmt.Fprintf(w, "%s", word) + flusher.Flush() + } +} + +func main() { + http.HandleFunc("/chunked", chunkedHandler) + fmt.Println("Starting server on :8080") + err := http.ListenAndServe(":8080", nil) + if err != nil { + fmt.Printf("Error starting server: %s\n", err) + } +} \ No newline at end of file diff --git a/x/net/http/_demo/server/redirectServer.go b/x/net/http/_demo/server/redirectServer.go new file mode 100644 index 0000000..a6830af --- /dev/null +++ b/x/net/http/_demo/server/redirectServer.go @@ -0,0 +1,26 @@ +package main + +import ( + "fmt" + "log" + "net/http" +) + +func main() { + http.HandleFunc("/", handleInitialRequest) + http.HandleFunc("/redirect", handleRedirectRequest) + + fmt.Println("Server is running on http://localhost:8080") + log.Fatal(http.ListenAndServe(":8080", nil)) +} + +func handleInitialRequest(w http.ResponseWriter, r *http.Request) { + log.Println("Received initial request, redirecting...") + http.Redirect(w, r, "/redirect", http.StatusSeeOther) +} + +func handleRedirectRequest(w http.ResponseWriter, r *http.Request) { + log.Println("Received redirect request, sending response...") + w.WriteHeader(http.StatusOK) + fmt.Fprint(w, "Hello redirect") +} diff --git a/x/net/http/_demo/timeout/timeout.go b/x/net/http/_demo/timeout/timeout.go new file mode 100644 index 0000000..a6930b1 --- /dev/null +++ b/x/net/http/_demo/timeout/timeout.go @@ -0,0 +1,33 @@ +package main + +import ( + "fmt" + "io" + "time" + + "github.com/goplus/llgoexamples/x/net/http" +) + +func main() { + client := &http.Client{ + Timeout: time.Millisecond, // Set a small timeout to ensure it will time out + //Timeout: time.Second, + } + req, err := http.NewRequest("GET", "https://www.baidu.com", nil) + if err != nil { + fmt.Println(err) + return + } + resp, err := client.Do(req) + if err != nil { + fmt.Println(err) + return + } + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + fmt.Println(err) + return + } + println(string(body)) +} diff --git a/x/net/http/_demo/upload/example.txt b/x/net/http/_demo/upload/example.txt new file mode 100755 index 0000000..1253cd4 --- /dev/null +++ b/x/net/http/_demo/upload/example.txt @@ -0,0 +1 @@ +hello upload \ No newline at end of file diff --git a/x/net/http/_demo/upload/upload.go b/x/net/http/_demo/upload/upload.go new file mode 100644 index 0000000..b5baffa --- /dev/null +++ b/x/net/http/_demo/upload/upload.go @@ -0,0 +1,50 @@ +package main + +import ( + "fmt" + "io" + "os" + + "github.com/goplus/llgoexamples/x/net/http" +) + +func main() { + url := "http://httpbin.org/post" + //url := "http://localhost:8080" + filePath := "/Users/spongehah/Documents/code/GOPATH/src/llgo/x/net/http/_demo/upload/example.txt" // Replace with your file path + //filePath := "/Users/spongehah/Downloads/xiaoshuo.txt" // Replace with your file path + + file, err := os.Open(filePath) + if err != nil { + fmt.Println("Error opening file:", err) + return + } + defer file.Close() + + client := &http.Client{} + req, err := http.NewRequest("POST", url, file) + if err != nil { + fmt.Println(err) + return + } + req.Header.Set("expect", "100-continue") + resp, err := client.Do(req) + + if err != nil { + fmt.Println(err) + return + } + defer resp.Body.Close() + fmt.Println("Status:", resp.Status) + for key, values := range resp.Header { + for _, value := range values { + fmt.Printf("%s: %s\n", key, value) + } + } + respBody, err := io.ReadAll(resp.Body) + if err != nil { + fmt.Println(err) + return + } + fmt.Println(string(respBody)) +} diff --git a/x/net/http/bodyChunk.go b/x/net/http/bodyChunk.go new file mode 100644 index 0000000..01d9e74 --- /dev/null +++ b/x/net/http/bodyChunk.go @@ -0,0 +1,80 @@ +package http + +import ( + "errors" + + "github.com/goplus/llgo/c/libuv" +) + +type bodyChunk struct { + chunk []byte + readCh chan []byte + asyncHandle *libuv.Async + + done chan struct{} + + rerr error +} + +var ( + errClosedBodyChunk = errors.New("bodyChunk: read/write on closed body") +) + +func newBodyChunk(asyncHandle *libuv.Async) *bodyChunk { + return &bodyChunk{ + readCh: make(chan []byte, 1), + done: make(chan struct{}), + asyncHandle: asyncHandle, + } +} + +func (bc *bodyChunk) Read(p []byte) (n int, err error) { + select { + case <-bc.done: + err = bc.readCloseError() + return + default: + } + + for n < len(p) { + if len(bc.chunk) == 0 { + bc.asyncHandle.Send() + select { + case chunk := <-bc.readCh: + bc.chunk = chunk + case <-bc.done: + err = bc.readCloseError() + return + } + } + + copied := copy(p[n:], bc.chunk) + n += copied + bc.chunk = bc.chunk[copied:] + } + + return +} + +func (bc *bodyChunk) Close() error { + return bc.closeWithError(nil) +} + +func (bc *bodyChunk) readCloseError() error { + if rerr := bc.rerr; rerr != nil { + return rerr + } + return errClosedBodyChunk +} + +func (bc *bodyChunk) closeWithError(err error) error { + if bc.rerr != nil { + return nil + } + if err == nil { + err = errClosedBodyChunk + } + bc.rerr = err + close(bc.done) + return nil +} diff --git a/x/net/http/client.go b/x/net/http/client.go new file mode 100644 index 0000000..fa62732 --- /dev/null +++ b/x/net/http/client.go @@ -0,0 +1,742 @@ +package http + +import ( + "context" + "encoding/base64" + "errors" + "fmt" + "io" + "log" + "net/url" + "reflect" + "sort" + "strings" + "time" +) + +type Client struct { + Transport RoundTripper + CheckRedirect func(req *Request, via []*Request) error + Jar CookieJar + Timeout time.Duration +} + +var DefaultClient = &Client{} + +type RoundTripper interface { + RoundTrip(*Request) (*Response, error) +} + +func (c *Client) transport() RoundTripper { + if c.Transport != nil { + return c.Transport + } + return DefaultTransport +} + +func Get(url string) (*Response, error) { + return DefaultClient.Get(url) +} + +func (c *Client) Get(url string) (*Response, error) { + req, err := NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + return c.Do(req) +} + +func alwaysFalse() bool { return false } + +// ErrUseLastResponse can be returned by Client.CheckRedirect hooks to +// control how redirects are processed. If returned, the next request +// is not sent and the most recent response is returned with its body +// unclosed. +var ErrUseLastResponse = errors.New("net/http: use last response") + +func Post(url, contentType string, body io.Reader) (resp *Response, err error) { + return DefaultClient.Post(url, contentType, body) +} + +func (c *Client) Post(url, contentType string, body io.Reader) (resp *Response, err error) { + req, err := NewRequest("POST", url, body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", contentType) + return c.Do(req) +} + +func PostForm(url string, data url.Values) (resp *Response, err error) { + return DefaultClient.PostForm(url, data) +} + +func (c *Client) PostForm(url string, data url.Values) (resp *Response, err error) { + return c.Post(url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) +} + +func (c *Client) Do(req *Request) (*Response, error) { + return c.do(req) +} + +var testHookClientDoResult func(retres *Response, reterr error) + +func (c *Client) do(req *Request) (retres *Response, reterr error) { + if testHookClientDoResult != nil { + defer func() { testHookClientDoResult(retres, reterr) }() + } + + if req.URL == nil { + req.closeBody() + return nil, &url.Error{ + Op: urlErrorOp(req.Method), + Err: errors.New("http: nil Request.URL"), + } + } + var ( + deadline = c.deadline() + reqs []*Request + resp *Response + copyHeaders = c.makeHeadersCopier(req) + reqBodyClosed = false // have we closed the current req.Body? + + // Redirect behavior: + redirectMethod string + includeBody bool + ) + uerr := func(err error) error { + // the body may have been closed already by c.send() + if !reqBodyClosed { + req.closeBody() + } + var urlStr string + if resp != nil && resp.Request != nil { + urlStr = stripPassword(resp.Request.URL) + } else { + urlStr = stripPassword(req.URL) + } + return &url.Error{ + Op: urlErrorOp(reqs[0].Method), + URL: urlStr, + Err: err, + } + } + + for { + // For all but the first request, create the next + // request hop and replace req. + if len(reqs) > 0 { + loc := resp.Header.Get("Location") + if loc == "" { + // While most 3xx responses include a Location, it is not + // required and 3xx responses without a Location have been + // observed in the wild. See issues #17773 and #49281. + return resp, nil + } + u, err := req.URL.Parse(loc) + if err != nil { + resp.closeBody() + return nil, uerr(fmt.Errorf("failed to parse Location header %q: %v", loc, err)) + } + host := "" + + if req.Host != "" && req.Host != req.URL.Host { + // If the caller specified a custom Host header and the + // redirect location is relative, preserve the Host header + // through the redirect. See issue #22233. + if u, _ := url.Parse(loc); u != nil && !u.IsAbs() { + host = req.Host + } + } + ireq := reqs[0] + req = &Request{ + Method: redirectMethod, + Response: resp, + URL: u, + Header: make(Header), + Host: host, + //Cancel: ireq.Cancel, + + timer: ireq.timer, + timeoutch: ireq.timeoutch, + } + if includeBody && ireq.GetBody != nil { + req.Body, err = ireq.GetBody() + if err != nil { + resp.closeBody() + return nil, uerr(err) + } + req.ContentLength = ireq.ContentLength + } + + // Copy original headers before setting the Referer, + // in case the user set Referer on their first request. + // If they really want to override, they can do it in + // their CheckRedirect func. + copyHeaders(req) + + // Add the Referer header from the most recent + // request URL to the new one, if it's not https->http: + if ref := refererForURL(reqs[len(reqs)-1].URL, req.URL, req.Header.Get("Referer")); ref != "" { + req.Header.Set("Referer", ref) + } + err = c.checkRedirect(req, reqs) + + // Sentinel error to let users select the + // previous response, without closing its + // body. See Issue 10069. + if err == ErrUseLastResponse { + return resp, nil + } + + // Close the previous response's body. But + // read at least some of the body so if it's + // small the underlying TCP connection will be + // re-used. No need to check for errors: if it + // fails, the Transport won't reuse it anyway. + const maxBodySlurpSize = 2 << 10 + if resp.ContentLength == -1 || resp.ContentLength <= maxBodySlurpSize { + io.CopyN(io.Discard, resp.Body, maxBodySlurpSize) + } + resp.Body.Close() + + if err != nil { + // Special case for Go 1 compatibility: return both the response + // and an error if the CheckRedirect function failed. + // See https://golang.org/issue/3795 + // The resp.Body has already been closed. + ue := uerr(err) + ue.(*url.Error).URL = loc + return resp, ue + } + } + + reqs = append(reqs, req) + var err error + var didTimeout func() bool + if resp, didTimeout, err = c.send(req, deadline); err != nil { + // c.send() always closes req.Body + reqBodyClosed = true + if !deadline.IsZero() && didTimeout() { + err = &httpError{ + err: err.Error() + " (Client.Timeout exceeded while awaiting headers)", + timeout: true, + } + } + return nil, uerr(err) + } + + var shouldRedirect bool + redirectMethod, shouldRedirect, includeBody = redirectBehavior(req.Method, resp, reqs[0]) + if !shouldRedirect { + return resp, nil + } + + req.closeBody() + } +} + +// didTimeout is non-nil only if err != nil. +func (c *Client) send(req *Request, deadline time.Time) (resp *Response, didTimeout func() bool, err error) { + if c.Jar != nil { + for _, cookie := range c.Jar.Cookies(req.URL) { + req.AddCookie(cookie) + } + } + resp, didTimeout, err = send(req, c.transport(), deadline) + if err != nil { + return nil, didTimeout, err + } + if c.Jar != nil { + if rc := resp.Cookies(); len(rc) > 0 { + c.Jar.SetCookies(req.URL, rc) + } + } + return resp, nil, nil +} + +// send issues an HTTP request. +// Caller should close resp.Body when done reading from it. +func send(ireq *Request, rt RoundTripper, deadline time.Time) (resp *Response, didTimeout func() bool, err error) { + req := ireq // req is either the original request, or a modified fork + + if rt == nil { + req.closeBody() + return nil, alwaysFalse, errors.New("http: no Client.Transport or DefaultTransport") + } + + if req.URL == nil { + req.closeBody() + return nil, alwaysFalse, errors.New("http: nil Request.URL") + } + + if req.RequestURI != "" { + req.closeBody() + return nil, alwaysFalse, errors.New("http: Request.RequestURI can't be set in client requests") + } + + // forkReq forks req into a shallow clone of ireq the first + // time it's called. + forkReq := func() { + if ireq == req { + req = new(Request) + *req = *ireq // shallow clone + } + } + + // Most the callers of send (Get, Post, et al) don't need + // Headers, leaving it uninitialized. We guarantee to the + // Transport that this has been initialized, though. + if req.Header == nil { + forkReq() + req.Header = make(Header) + } + + if u := req.URL.User; u != nil && req.Header.Get("Authorization") == "" { + username := u.Username() + password, _ := u.Password() + forkReq() + req.Header = cloneOrMakeHeader(ireq.Header) + req.Header.Set("Authorization", "Basic "+basicAuth(username, password)) + } + + if !deadline.IsZero() { + forkReq() + } + + // TODO(hah) tmp timeout(send): LLGo has not yet implemented startTimer. + //stopTimer, didTimeout := setRequestCancel(req, rt, deadline) + req.timeoutch = make(chan struct{}, 1) + req.deadline = deadline + if deadline.IsZero() { + didTimeout = alwaysFalse + defer close(req.timeoutch) + } else { + didTimeout = func() bool { return time.Now().After(deadline) } + } + + resp, err = rt.RoundTrip(req) + if err != nil { + //stopTimer() + if resp != nil { + log.Printf("RoundTripper returned a response & error; ignoring response") + } + //if tlsErr, ok := err.(tls.RecordHeaderError); ok { + // // If we get a bad TLS record header, check to see if the + // // response looks like HTTP and give a more helpful error. + // // See golang.org/issue/11111. + // if string(tlsErr.RecordHeader[:]) == "HTTP/" { + // err = ErrSchemeMismatch + // } + //} + return nil, didTimeout, err + } + if resp == nil { + return nil, didTimeout, fmt.Errorf("http: RoundTripper implementation (%T) returned a nil *Response with a nil error", rt) + } + if resp.Body == nil { + // The documentation on the Body field says “The http Client and Transport + // guarantee that Body is always non-nil, even on responses without a body + // or responses with a zero-length body.” Unfortunately, we didn't document + // that same constraint for arbitrary RoundTripper implementations, and + // RoundTripper implementations in the wild (mostly in tests) assume that + // they can use a nil Body to mean an empty one (similar to Request.Body). + // (See https://golang.org/issue/38095.) + // + // If the ContentLength allows the Body to be empty, fill in an empty one + // here to ensure that it is non-nil. + if resp.ContentLength > 0 && req.Method != "HEAD" { + return nil, didTimeout, fmt.Errorf("http: RoundTripper implementation (%T) returned a *Response with content length %d but a nil Body", rt, resp.ContentLength) + } + resp.Body = io.NopCloser(strings.NewReader("")) + } + //if !deadline.IsZero() { + // resp.Body = &cancelTimerBody{ + // stop: stopTimer, + // rc: resp.Body, + // reqDidTimeout: didTimeout, + // } + //} + return resp, nil, nil +} + +// redirectBehavior describes what should happen when the +// client encounters a 3xx status code from the server. +func redirectBehavior(reqMethod string, resp *Response, ireq *Request) (redirectMethod string, shouldRedirect, includeBody bool) { + switch resp.StatusCode { + case 301, 302, 303: + redirectMethod = reqMethod + shouldRedirect = true + includeBody = false + + // RFC 2616 allowed automatic redirection only with GET and + // HEAD requests. RFC 7231 lifts this restriction, but we still + // restrict other methods to GET to maintain compatibility. + // See Issue 18570. + if reqMethod != "GET" && reqMethod != "HEAD" { + redirectMethod = "GET" + } + case 307, 308: + redirectMethod = reqMethod + shouldRedirect = true + includeBody = true + + if ireq.GetBody == nil && ireq.outgoingLength() != 0 { + // We had a request body, and 307/308 require + // re-sending it, but GetBody is not defined. So just + // return this response to the user instead of an + // error, like we did in Go 1.7 and earlier. + shouldRedirect = false + } + } + return redirectMethod, shouldRedirect, includeBody +} + +// outgoingLength reports the Content-Length of this outgoing (Client) request. +// It maps 0 into -1 (unknown) when the Body is non-nil. +func (r *Request) outgoingLength() int64 { + if r.Body == nil || r.Body == NoBody { + return 0 + } + if r.ContentLength != 0 { + return r.ContentLength + } + return -1 +} + +// urlErrorOp returns the (*url.Error).Op value to use for the +// provided (*Request).Method value. +func urlErrorOp(method string) string { + if method == "" { + return "Get" + } + if lowerMethod, ok := ToLower(method); ok { + return method[:1] + lowerMethod[1:] + } + return method +} + +func stripPassword(u *url.URL) string { + _, passSet := u.User.Password() + if passSet { + return strings.Replace(u.String(), u.User.String()+"@", u.User.Username()+":***@", 1) + } + return u.String() +} + +// See 2 (end of page 4) https://www.ietf.org/rfc/rfc2617.txt +// "To receive authorization, the client sends the userid and password, +// separated by a single colon (":") character, within a base64 +// encoded string in the credentials." +// It is not meant to be urlencoded. +func basicAuth(username, password string) string { + auth := username + ":" + password + return base64.StdEncoding.EncodeToString([]byte(auth)) +} + +func (c *Client) deadline() time.Time { + if c.Timeout > 0 { + return time.Now().Add(c.Timeout) + } + return time.Time{} +} + +// cancelTimerBody is an io.ReadCloser that wraps rc with two features: +// 1. On Read error or close, the stop func is called. +// 2. On Read failure, if reqDidTimeout is true, the error is wrapped and +// marked as net.Error that hit its timeout. +type cancelTimerBody struct { + stop func() // stops the time.Timer waiting to cancel the request + rc io.ReadCloser + reqDidTimeout func() bool +} + +func (b *cancelTimerBody) Read(p []byte) (n int, err error) { + n, err = b.rc.Read(p) + if err == nil { + return n, nil + } + if err == io.EOF { + return n, err + } + if b.reqDidTimeout() { + err = &httpError{ + err: err.Error() + " (Client.Timeout or context cancellation while reading body)", + timeout: true, + } + } + return n, err +} + +func (b *cancelTimerBody) Close() error { + err := b.rc.Close() + b.stop() + return err +} + +//// setRequestCancel sets req.Cancel and adds a deadline context to req +//// if deadline is non-zero. The RoundTripper's type is used to +//// determine whether the legacy CancelRequest behavior should be used. +//// +//// As background, there are three ways to cancel a request: +//// First was Transport.CancelRequest. (deprecated) +//// Second was Request.Cancel. +//// Third was Request.Context. +//// This function populates the second and third, and uses the first if it really needs to. +//func setRequestCancel(req *Request, rt RoundTripper, deadline time.Time) (stopTimer func(), didTimeout func() bool) { +// if deadline.IsZero() { +// return nop, alwaysFalse +// } +// knownTransport := knownRoundTripperImpl(rt, req) +// oldCtx := req.Context() +// +// if req.Cancel == nil && knownTransport { +// // If they already had a Request.Context that's +// // expiring sooner, do nothing: +// if !timeBeforeContextDeadline(deadline, oldCtx) { +// return nop, alwaysFalse +// } +// +// var cancelCtx func() +// req.ctx, cancelCtx = context.WithDeadline(oldCtx, deadline) +// return cancelCtx, func() bool { return time.Now().After(deadline) } +// } +// initialReqCancel := req.Cancel // the user's original Request.Cancel, if any +// +// var cancelCtx func() +// if timeBeforeContextDeadline(deadline, oldCtx) { +// req.ctx, cancelCtx = context.WithDeadline(oldCtx, deadline) +// } +// +// cancel := make(chan struct{}) +// req.Cancel = cancel +// +// doCancel := func() { +// // The second way in the func comment above: +// close(cancel) +// // The first way, used only for RoundTripper +// // implementations written before Go 1.5 or Go 1.6. +// type canceler interface{ CancelRequest(*Request) } +// if v, ok := rt.(canceler); ok { +// v.CancelRequest(req) +// } +// } +// +// stopTimerCh := make(chan struct{}) +// var once sync.Once +// stopTimer = func() { +// once.Do(func() { +// close(stopTimerCh) +// if cancelCtx != nil { +// cancelCtx() +// } +// }) +// } +// +// timer := time.NewTimer(time.Until(deadline)) +// var timedOut atomic.Bool +// +// go func() { +// select { +// case <-initialReqCancel: +// doCancel() +// timer.Stop() +// case <-timer.C: +// timedOut.Store(true) +// doCancel() +// case <-stopTimerCh: +// timer.Stop() +// } +// }() +// +// return stopTimer, timedOut.Load +//} + +// timeBeforeContextDeadline reports whether the non-zero Time t is +// before ctx's deadline, if any. If ctx does not have a deadline, it +// always reports true (the deadline is considered infinite). +func timeBeforeContextDeadline(t time.Time, ctx context.Context) bool { + d, ok := ctx.Deadline() + if !ok { + return true + } + return t.Before(d) +} + +// knownRoundTripperImpl reports whether rt is a RoundTripper that's +// maintained by the Go team and known to implement the latest +// optional semantics (notably contexts). The Request is used +// to check whether this particular request is using an alternate protocol, +// in which case we need to check the RoundTripper for that protocol. +func knownRoundTripperImpl(rt RoundTripper, req *Request) bool { + switch t := rt.(type) { + case *Transport: + if altRT := t.alternateRoundTripper(req); altRT != nil { + return knownRoundTripperImpl(altRT, req) + } + return true + //case *http2Transport, http2noDialH2RoundTripper: + // return true + } + // There's a very minor chance of a false positive with this. + // Instead of detecting our golang.org/x/net/http2.Transport, + // it might detect a Transport type in a different http2 + // package. But I know of none, and the only problem would be + // some temporarily leaked goroutines if the transport didn't + // support contexts. So this is a good enough heuristic: + if reflect.TypeOf(rt).String() == "*http2.Transport" { + return true + } + return false +} + +// makeHeadersCopier makes a function that copies headers from the +// initial Request, ireq. For every redirect, this function must be called +// so that it can copy headers into the upcoming Request. +func (c *Client) makeHeadersCopier(ireq *Request) func(*Request) { + // The headers to copy are from the very initial request. + // We use a closured callback to keep a reference to these original headers. + var ( + ireqhdr = cloneOrMakeHeader(ireq.Header) + icookies map[string][]*Cookie + ) + if c.Jar != nil && ireq.Header.Get("Cookie") != "" { + icookies = make(map[string][]*Cookie) + for _, c := range ireq.Cookies() { + icookies[c.Name] = append(icookies[c.Name], c) + } + } + + preq := ireq // The previous request + return func(req *Request) { + // If Jar is present and there was some initial cookies provided + // via the request header, then we may need to alter the initial + // cookies as we follow redirects since each redirect may end up + // modifying a pre-existing cookie. + // + // Since cookies already set in the request header do not contain + // information about the original domain and path, the logic below + // assumes any new set cookies override the original cookie + // regardless of domain or path. + // + // See https://golang.org/issue/17494 + if c.Jar != nil && icookies != nil { + var changed bool + resp := req.Response // The response that caused the upcoming redirect + for _, c := range resp.Cookies() { + if _, ok := icookies[c.Name]; ok { + delete(icookies, c.Name) + changed = true + } + } + if changed { + ireqhdr.Del("Cookie") + var ss []string + for _, cs := range icookies { + for _, c := range cs { + ss = append(ss, c.Name+"="+c.Value) + } + } + sort.Strings(ss) // Ensure deterministic headers + ireqhdr.Set("Cookie", strings.Join(ss, "; ")) + } + } + + // Copy the initial request's Header values + // (at least the safe ones). + for k, vv := range ireqhdr { + if shouldCopyHeaderOnRedirect(k, preq.URL, req.URL) { + req.Header[k] = vv + } + } + + preq = req // Update previous Request with the current request + } +} + +func shouldCopyHeaderOnRedirect(headerKey string, initial, dest *url.URL) bool { + switch CanonicalHeaderKey(headerKey) { + case "Authorization", "Www-Authenticate", "Cookie", "Cookie2": + // Permit sending auth/cookie headers from "foo.com" + // to "sub.foo.com". + + // Note that we don't send all cookies to subdomains + // automatically. This function is only used for + // Cookies set explicitly on the initial outgoing + // client request. Cookies automatically added via the + // CookieJar mechanism continue to follow each + // cookie's scope as set by Set-Cookie. But for + // outgoing requests with the Cookie header set + // directly, we don't know their scope, so we assume + // it's for *.domain.com. + + ihost := idnaASCIIFromURL(initial) + dhost := idnaASCIIFromURL(dest) + return isDomainOrSubdomain(dhost, ihost) + } + // All other headers are copied: + return true +} + +// isDomainOrSubdomain reports whether sub is a subdomain (or exact +// match) of the parent domain. +// +// Both domains must already be in canonical form. +func isDomainOrSubdomain(sub, parent string) bool { + if sub == parent { + return true + } + // If sub is "foo.example.com" and parent is "example.com", + // that means sub must end in "."+parent. + // Do it without allocating. + if !strings.HasSuffix(sub, parent) { + return false + } + return sub[len(sub)-len(parent)-1] == '.' +} + +// refererForURL returns a referer without any authentication info or +// an empty string if lastReq scheme is https and newReq scheme is http. +// If the referer was explicitly set, then it will continue to be used. +func refererForURL(lastReq, newReq *url.URL, explicitRef string) string { + // https://tools.ietf.org/html/rfc7231#section-5.5.2 + // "Clients SHOULD NOT include a Referer header field in a + // (non-secure) HTTP request if the referring page was + // transferred with a secure protocol." + if lastReq.Scheme == "https" && newReq.Scheme == "http" { + return "" + } + if explicitRef != "" { + return explicitRef + } + + referer := lastReq.String() + if lastReq.User != nil { + // This is not very efficient, but is the best we can + // do without: + // - introducing a new method on URL + // - creating a race condition + // - copying the URL struct manually, which would cause + // maintenance problems down the line + auth := lastReq.User.String() + "@" + referer = strings.Replace(referer, auth, "", 1) + } + return referer +} + +// checkRedirect calls either the user's configured CheckRedirect +// function, or the default. +func (c *Client) checkRedirect(req *Request, via []*Request) error { + fn := c.CheckRedirect + if fn == nil { + fn = defaultCheckRedirect + } + return fn(req, via) +} + +func defaultCheckRedirect(req *Request, via []*Request) error { + if len(via) >= 10 { + return errors.New("stopped after 10 redirects") + } + return nil +} diff --git a/x/net/http/clone.go b/x/net/http/clone.go new file mode 100644 index 0000000..ff67949 --- /dev/null +++ b/x/net/http/clone.go @@ -0,0 +1,11 @@ +package http + +// cloneOrMakeHeader invokes Header.Clone but if the +// result is nil, it'll instead make and return a non-nil Header. +func cloneOrMakeHeader(hdr Header) Header { + clone := hdr.Clone() + if clone == nil { + clone = make(Header) + } + return clone +} diff --git a/x/net/http/cookie.go b/x/net/http/cookie.go new file mode 100644 index 0000000..4b7175c --- /dev/null +++ b/x/net/http/cookie.go @@ -0,0 +1,232 @@ +package http + +import ( + "log" + "net/textproto" + "strconv" + "strings" + "time" +) + +// A Cookie represents an HTTP cookie as sent in the Set-Cookie header of an +// HTTP response or the Cookie header of an HTTP request. +// +// See https://tools.ietf.org/html/rfc6265 for details. +type Cookie struct { + Name string + Value string + + Path string // optional + Domain string // optional + Expires time.Time // optional + RawExpires string // for reading cookies only + + // MaxAge=0 means no 'Max-Age' attribute specified. + // MaxAge<0 means delete cookie now, equivalently 'Max-Age: 0' + // MaxAge>0 means Max-Age attribute present and given in seconds + MaxAge int + Secure bool + HttpOnly bool + SameSite SameSite + Raw string + Unparsed []string // Raw text of unparsed attribute-value pairs +} + +// SameSite allows a server to define a cookie attribute making it impossible for +// the browser to send this cookie along with cross-site requests. The main +// goal is to mitigate the risk of cross-origin information leakage, and provide +// some protection against cross-site request forgery attacks. +// +// See https://tools.ietf.org/html/draft-ietf-httpbis-cookie-same-site-00 for details. +type SameSite int + +const ( + SameSiteDefaultMode SameSite = iota + 1 + SameSiteLaxMode + SameSiteStrictMode + SameSiteNoneMode +) + +var cookieNameSanitizer = strings.NewReplacer("\n", "-", "\r", "-") + +func sanitizeCookieName(n string) string { + return cookieNameSanitizer.Replace(n) +} + +// sanitizeCookieValue produces a suitable cookie-value from v. +// https://tools.ietf.org/html/rfc6265#section-4.1.1 +// +// cookie-value = *cookie-octet / ( DQUOTE *cookie-octet DQUOTE ) +// cookie-octet = %x21 / %x23-2B / %x2D-3A / %x3C-5B / %x5D-7E +// ; US-ASCII characters excluding CTLs, +// ; whitespace DQUOTE, comma, semicolon, +// ; and backslash +// +// We loosen this as spaces and commas are common in cookie values +// but we produce a quoted cookie-value if and only if v contains +// commas or spaces. +// See https://golang.org/issue/7243 for the discussion. +func sanitizeCookieValue(v string) string { + v = sanitizeOrWarn("Cookie.Value", validCookieValueByte, v) + if len(v) == 0 { + return v + } + if strings.ContainsAny(v, " ,") { + return `"` + v + `"` + } + return v +} + +func validCookieValueByte(b byte) bool { + return 0x20 <= b && b < 0x7f && b != '"' && b != ';' && b != '\\' +} + +func sanitizeOrWarn(fieldName string, valid func(byte) bool, v string) string { + ok := true + for i := 0; i < len(v); i++ { + if valid(v[i]) { + continue + } + log.Printf("net/http: invalid byte %q in %s; dropping invalid bytes", v[i], fieldName) + ok = false + break + } + if ok { + return v + } + buf := make([]byte, 0, len(v)) + for i := 0; i < len(v); i++ { + if b := v[i]; valid(b) { + buf = append(buf, b) + } + } + return string(buf) +} + +// readSetCookies parses all "Set-Cookie" values from +// the header h and returns the successfully parsed Cookies. +func readSetCookies(h Header) []*Cookie { + cookieCount := len(h["Set-Cookie"]) + if cookieCount == 0 { + return []*Cookie{} + } + cookies := make([]*Cookie, 0, cookieCount) + for _, line := range h["Set-Cookie"] { + parts := strings.Split(textproto.TrimString(line), ";") + if len(parts) == 1 && parts[0] == "" { + continue + } + parts[0] = textproto.TrimString(parts[0]) + name, value, ok := strings.Cut(parts[0], "=") + if !ok { + continue + } + name = textproto.TrimString(name) + if !isCookieNameValid(name) { + continue + } + value, ok = parseCookieValue(value, true) + if !ok { + continue + } + c := &Cookie{ + Name: name, + Value: value, + Raw: line, + } + for i := 1; i < len(parts); i++ { + parts[i] = textproto.TrimString(parts[i]) + if len(parts[i]) == 0 { + continue + } + + attr, val, _ := strings.Cut(parts[i], "=") + lowerAttr, isASCII := ToLower(attr) + if !isASCII { + continue + } + val, ok = parseCookieValue(val, false) + if !ok { + c.Unparsed = append(c.Unparsed, parts[i]) + continue + } + + switch lowerAttr { + case "samesite": + lowerVal, ascii := ToLower(val) + if !ascii { + c.SameSite = SameSiteDefaultMode + continue + } + switch lowerVal { + case "lax": + c.SameSite = SameSiteLaxMode + case "strict": + c.SameSite = SameSiteStrictMode + case "none": + c.SameSite = SameSiteNoneMode + default: + c.SameSite = SameSiteDefaultMode + } + continue + case "secure": + c.Secure = true + continue + case "httponly": + c.HttpOnly = true + continue + case "domain": + c.Domain = val + continue + case "max-age": + secs, err := strconv.Atoi(val) + if err != nil || secs != 0 && val[0] == '0' { + break + } + if secs <= 0 { + secs = -1 + } + c.MaxAge = secs + continue + case "expires": + c.RawExpires = val + exptime, err := time.Parse(time.RFC1123, val) + if err != nil { + exptime, err = time.Parse("Mon, 02-Jan-2006 15:04:05 MST", val) + if err != nil { + c.Expires = time.Time{} + break + } + } + c.Expires = exptime.UTC() + continue + case "path": + c.Path = val + continue + } + c.Unparsed = append(c.Unparsed, parts[i]) + } + cookies = append(cookies, c) + } + return cookies +} + +func isCookieNameValid(raw string) bool { + if raw == "" { + return false + } + return strings.IndexFunc(raw, isNotToken) < 0 +} + +func parseCookieValue(raw string, allowDoubleQuote bool) (string, bool) { + // Strip the quotes, if present. + if allowDoubleQuote && len(raw) > 1 && raw[0] == '"' && raw[len(raw)-1] == '"' { + raw = raw[1 : len(raw)-1] + } + for i := 0; i < len(raw); i++ { + if !validCookieValueByte(raw[i]) { + return "", false + } + } + return raw, true +} diff --git a/x/net/http/header.go b/x/net/http/header.go new file mode 100644 index 0000000..7c95411 --- /dev/null +++ b/x/net/http/header.go @@ -0,0 +1,253 @@ +package http + +import ( + "fmt" + "net/textproto" + "sort" + "strings" + "sync" + + "github.com/goplus/llgo/c" + "github.com/goplus/llgoexamples/rust/hyper" +) + +// A Header represents the key-value pairs in an HTTP header. +// +// The keys should be in canonical form, as returned by +// CanonicalHeaderKey. +type Header map[string][]string + +// Add adds the key, value pair to the header. +// It appends to any existing values associated with key. +// The key is case insensitive; it is canonicalized by +// CanonicalHeaderKey. +func (h Header) Add(key, value string) { + textproto.MIMEHeader(h).Add(key, value) +} + +// Set sets the header entries associated with key to the +// single element value. It replaces any existing values +// associated with key. The key is case insensitive; it is +// canonicalized by textproto.CanonicalMIMEHeaderKey. +// To use non-canonical keys, assign to the map directly. +func (h Header) Set(key, value string) { + textproto.MIMEHeader(h).Set(key, value) +} + +// Get gets the first value associated with the given key. If +// there are no values associated with the key, Get returns "". +// It is case insensitive; textproto.CanonicalMIMEHeaderKey is +// used to canonicalize the provided key. Get assumes that all +// keys are stored in canonical form. To use non-canonical keys, +// access the map directly. +func (h Header) Get(key string) string { + return textproto.MIMEHeader(h).Get(key) +} + +// Values returns all values associated with the given key. +// It is case insensitive; textproto.CanonicalMIMEHeaderKey is +// used to canonicalize the provided key. To use non-canonical +// keys, access the map directly. +// The returned slice is not a copy. +func (h Header) Values(key string) []string { + return textproto.MIMEHeader(h).Values(key) +} + +// get is like Get, but key must already be in CanonicalHeaderKey form. +func (h Header) get(key string) string { + if v := h[key]; len(v) > 0 { + return v[0] + } + return "" +} + +// has reports whether h has the provided key defined, even if it's +// set to 0-length slice. +func (h Header) has(key string) bool { + _, ok := h[key] + return ok +} + +// Del deletes the values associated with key. +// The key is case insensitive; it is canonicalized by +// CanonicalHeaderKey. +func (h Header) Del(key string) { + textproto.MIMEHeader(h).Del(key) +} + +// Clone returns a copy of h or nil if h is nil. +func (h Header) Clone() Header { + if h == nil { + return nil + } + + // Find total number of values. + nv := 0 + for _, vv := range h { + nv += len(vv) + } + sv := make([]string, nv) // shared backing array for headers' values + h2 := make(Header, len(h)) + for k, vv := range h { + if vv == nil { + // Preserve nil values. ReverseProxy distinguishes + // between nil and zero-length header values. + h2[k] = nil + continue + } + n := copy(sv, vv) + h2[k] = sv[:n:n] + sv = sv[n:] + } + return h2 +} + +// sortedKeyValues returns h's keys sorted in the returned kvs +// slice. The headerSorter used to sort is also returned, for possible +// return to headerSorterCache. +func (h Header) sortedKeyValues(exclude map[string]bool) (kvs []keyValues, hs *headerSorter) { + hs = headerSorterPool.Get().(*headerSorter) + if cap(hs.kvs) < len(h) { + hs.kvs = make([]keyValues, 0, len(h)) + } + kvs = hs.kvs[:0] + for k, vv := range h { + if !exclude[k] { + kvs = append(kvs, keyValues{k, vv}) + } + } + hs.kvs = kvs + sort.Sort(hs) + return kvs, hs +} + +// Write writes a header in wire format. +func (h Header) Write(reqHeaders *hyper.Headers) error { + return h.write(reqHeaders) +} + +func (h Header) write(reqHeaders *hyper.Headers) error { + return h.writeSubset(reqHeaders, nil) +} + +// WriteSubset writes a header in wire format. +// If exclude is not nil, keys where exclude[key] == true are not written. +// Keys are not canonicalized before checking the exclude map. +func (h Header) WriteSubset(reqHeaders *hyper.Headers, exclude map[string]bool) error { + return h.writeSubset(reqHeaders, exclude) +} + +func (h Header) writeSubset(reqHeaders *hyper.Headers, exclude map[string]bool) error { + kvs, sorter := h.sortedKeyValues(exclude) + for _, kv := range kvs { + if !ValidHeaderFieldName(kv.key) { + // This could be an error. In the common case of + // writing response headers, however, we have no good + // way to provide the error back to the server + // handler, so just drop invalid headers instead. + continue + } + for _, v := range kv.values { + v = headerNewlineToSpace.Replace(v) + v = textproto.TrimString(v) + if reqHeaders.Add(&[]byte(kv.key)[0], c.Strlen(c.AllocaCStr(kv.key)), &[]byte(v)[0], c.Strlen(c.AllocaCStr(v))) != hyper.OK { + headerSorterPool.Put(sorter) + return fmt.Errorf("error adding header %s: %s\n", kv.key, v) + } + //if trace != nil && trace.WroteHeaderField != nil { + // formattedVals = append(formattedVals, v) + //} + } + //if trace != nil && trace.WroteHeaderField != nil { + // trace.WroteHeaderField(kv.key, formattedVals) + // formattedVals = nil + //} + } + + headerSorterPool.Put(sorter) + return nil +} + +var headerNewlineToSpace = strings.NewReplacer("\n", " ", "\r", " ") + +type keyValues struct { + key string + values []string +} + +// A headerSorter implements sort.Interface by sorting a []keyValues +// by key. It's used as a pointer, so it can fit in a sort.Interface +// interface value without allocation. +type headerSorter struct { + kvs []keyValues +} + +func (s *headerSorter) Len() int { return len(s.kvs) } +func (s *headerSorter) Swap(i, j int) { s.kvs[i], s.kvs[j] = s.kvs[j], s.kvs[i] } +func (s *headerSorter) Less(i, j int) bool { return s.kvs[i].key < s.kvs[j].key } + +var headerSorterPool = sync.Pool{ + New: func() any { return new(headerSorter) }, +} + +// CanonicalHeaderKey returns the canonical format of the +// header key s. The canonicalization converts the first +// letter and any letter following a hyphen to upper case; +// the rest are converted to lowercase. For example, the +// canonical key for "accept-encoding" is "Accept-Encoding". +// If s contains a space or invalid header field bytes, it is +// returned without modifications. +func CanonicalHeaderKey(s string) string { return textproto.CanonicalMIMEHeaderKey(s) } + +// hasToken reports whether token appears with v, ASCII +// case-insensitive, with space or comma boundaries. +// token must be all lowercase. +// v may contain mixed cased. +func hasToken(v, token string) bool { + if len(token) > len(v) || token == "" { + return false + } + if v == token { + return true + } + for sp := 0; sp <= len(v)-len(token); sp++ { + // Check that first character is good. + // The token is ASCII, so checking only a single byte + // is sufficient. We skip this potential starting + // position if both the first byte and its potential + // ASCII uppercase equivalent (b|0x20) don't match. + // False positives ('^' => '~') are caught by EqualFold. + if b := v[sp]; b != token[0] && b|0x20 != token[0] { + continue + } + // Check that start pos is on a valid token boundary. + if sp > 0 && !isTokenBoundary(v[sp-1]) { + continue + } + // Check that end pos is on a valid token boundary. + if endPos := sp + len(token); endPos != len(v) && !isTokenBoundary(v[endPos]) { + continue + } + if EqualFold(v[sp:sp+len(token)], token) { + return true + } + } + return false +} + +func isTokenBoundary(b byte) bool { + return b == ' ' || b == ',' || b == '\t' +} + +// appendToResponseHeader (HeadersForEachCallback) prints each header to the console +func appendToResponseHeader(userdata c.Pointer, name *uint8, nameLen uintptr, value *uint8, valueLen uintptr) c.Int { + resp := (*Response)(userdata) + nameStr := c.GoString((*int8)(c.Pointer(name)), nameLen) + valueStr := c.GoString((*int8)(c.Pointer(value)), valueLen) + + if resp.Header == nil { + resp.Header = make(Header) + } + resp.Header.Add(nameStr, valueStr) + return hyper.IterContinue +} diff --git a/x/net/http/http.go b/x/net/http/http.go new file mode 100644 index 0000000..2f5d5ab --- /dev/null +++ b/x/net/http/http.go @@ -0,0 +1,38 @@ +package http + +import "strings" + +// splitTwoDigitNumber splits a two-digit number into two digits. +func splitTwoDigitNumber(num int) (int, int) { + tens := num / 10 + ones := num % 10 + return tens, ones +} + +func isNotToken(r rune) bool { + return !IsTokenRune(r) +} + +// stringContainsCTLByte reports whether s contains any ASCII control character. +func stringContainsCTLByte(s string) bool { + for i := 0; i < len(s); i++ { + b := s[i] + if b < ' ' || b == 0x7f { + return true + } + } + return false +} + +// removeEmptyPort strips the empty port in ":port" to "" +// as mandated by RFC 3986 Section 6.2.3. +func removeEmptyPort(host string) string { + if hasPort(host) { + return strings.TrimSuffix(host, ":") + } + return host +} + +// Given a string of the form "host", "host:port", or "[ipv6::address]:port", +// return true if the string includes a port. +func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") } diff --git a/x/net/http/jar.go b/x/net/http/jar.go new file mode 100644 index 0000000..5c3de0d --- /dev/null +++ b/x/net/http/jar.go @@ -0,0 +1,27 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http + +import ( + "net/url" +) + +// A CookieJar manages storage and use of cookies in HTTP requests. +// +// Implementations of CookieJar must be safe for concurrent use by multiple +// goroutines. +// +// The net/http/cookiejar package provides a CookieJar implementation. +type CookieJar interface { + // SetCookies handles the receipt of the cookies in a reply for the + // given URL. It may or may not choose to save the cookies, depending + // on the jar's policy and implementation. + SetCookies(u *url.URL, cookies []*Cookie) + + // Cookies returns the cookies to send in a request for the given URL. + // It is up to the implementation to honor the standard cookie use + // restrictions such as in RFC 6265. + Cookies(u *url.URL) []*Cookie +} diff --git a/x/net/http/request.go b/x/net/http/request.go new file mode 100644 index 0000000..37d6408 --- /dev/null +++ b/x/net/http/request.go @@ -0,0 +1,490 @@ +package http + +import ( + "bytes" + "errors" + "fmt" + "io" + "net/textproto" + "net/url" + "strings" + "time" + + "github.com/goplus/llgo/c/libuv" + "golang.org/x/net/idna" + + "github.com/goplus/llgo/c" + "github.com/goplus/llgoexamples/rust/hyper" +) + +type Request struct { + Method string + URL *url.URL + Proto string // "HTTP/1.0" + ProtoMajor int // 1 + ProtoMinor int // 0 + Header Header + Body io.ReadCloser + GetBody func() (io.ReadCloser, error) + ContentLength int64 + TransferEncoding []string + Close bool + Host string + // Form url.Values + // PostForm url.Values + // MultipartForm *multipart.Form + RemoteAddr string + RequestURI string + + Response *Response + + deadline time.Time + timeoutch chan struct{} + timer *libuv.Timer +} + +const defaultChunkSize = 8192 + +// NOTE: This is not intended to reflect the actual Go version being used. +// It was changed at the time of Go 1.1 release because the former User-Agent +// had ended up blocked by some intrusion detection systems. +// See https://codereview.appspot.com/7532043. +const defaultUserAgent = "Go-http-client/1.1" + +// errMissingHost is returned by Write when there is no Host or URL present in +// the Request. +var errMissingHost = errors.New("http: Request.Write on Request with no Host or URL set") + +// Headers that Request.Write handles itself and should be skipped. +var reqWriteExcludeHeader = map[string]bool{ + "Host": true, // not in Header map anyway + "User-Agent": true, + "Content-Length": true, + "Transfer-Encoding": true, + "Trailer": true, +} + +// requestBodyReadError wraps an error from (*Request).write to indicate +// that the error came from a Read call on the Request.Body. +// This error type should not escape the net/http package to users. +type requestBodyReadError struct{ error } + +// NewRequest wraps NewRequestWithContext using context.Background. +func NewRequest(method, urlStr string, body io.Reader) (*Request, error) { + // TODO(hah) Hyper only supports http + isHttpPrefix := strings.HasPrefix(urlStr, "http://") + isHttpsPrefix := strings.HasPrefix(urlStr, "https://") + if !isHttpPrefix && !isHttpsPrefix { + urlStr = "http://" + urlStr + } + if isHttpsPrefix { + urlStr = "http://" + strings.TrimPrefix(urlStr, "https://") + } + + if method == "" { + // We document that "" means "GET" for Request.Method, and people have + // relied on that from NewRequest, so keep that working. + // We still enforce validMethod for non-empty methods. + method = "GET" + } + if !validMethod(method) { + return nil, fmt.Errorf("net/http: invalid method %q", method) + } + u, err := url.Parse(urlStr) + if err != nil { + return nil, err + } + rc, ok := body.(io.ReadCloser) + if !ok && body != nil { + rc = io.NopCloser(body) + } + // The host's colon:port should be normalized. See Issue 14836. + u.Host = removeEmptyPort(u.Host) + req := &Request{ + Method: method, + URL: u, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: make(Header), + Body: rc, + Host: u.Host, + timer: nil, + } + if body != nil { + switch v := body.(type) { + case *bytes.Buffer: + req.ContentLength = int64(v.Len()) + buf := v.Bytes() + req.GetBody = func() (io.ReadCloser, error) { + r := bytes.NewReader(buf) + return io.NopCloser(r), nil + } + + case *bytes.Reader: + req.ContentLength = int64(v.Len()) + snapshot := *v + req.GetBody = func() (io.ReadCloser, error) { + r := snapshot + return io.NopCloser(&r), nil + } + case *strings.Reader: + req.ContentLength = int64(v.Len()) + snapshot := *v + req.GetBody = func() (io.ReadCloser, error) { + r := snapshot + return io.NopCloser(&r), nil + } + default: + // This is where we'd set it to -1 (at least + // if body != NoBody) to mean unknown, but + // that broke people during the Go 1.8 testing + // period. People depend on it being 0 I + // guess. Maybe retry later. See Issue 18117. + } + // For client requests, Request.ContentLength of 0 + // means either actually 0, or unknown. The only way + // to explicitly say that the ContentLength is zero is + // to set the Body to nil. But turns out too much code + // depends on NewRequest returning a non-nil Body, + // so we use a well-known ReadCloser variable instead + // and have the http package also treat that sentinel + // variable to mean explicitly zero. + if req.GetBody != nil && req.ContentLength == 0 { + req.Body = NoBody + req.GetBody = func() (io.ReadCloser, error) { return NoBody, nil } + } + } + + return req, nil +} + +func (r *Request) expectsContinue() bool { + return hasToken(r.Header.get("Expect"), "100-continue") +} + +func (r *Request) wantsClose() bool { + if r.Close { + return true + } + return hasToken(r.Header.get("Connection"), "close") +} + +func (r *Request) closeBody() error { + if r.Body == nil { + return nil + } + return r.Body.Close() +} + +func (r *Request) isReplayable() bool { + if r.Body == nil || r.Body == NoBody || r.GetBody != nil { + switch valueOrDefault(r.Method, "GET") { + case "GET", "HEAD", "OPTIONS", "TRACE": + return true + } + // The Idempotency-Key, while non-standard, is widely used to + // mean a POST or other request is idempotent. See + // https://golang.org/issue/19943#issuecomment-421092421 + if r.Header.has("Idempotency-Key") || r.Header.has("X-Idempotency-Key") { + return true + } + } + return false +} + +// AddCookie adds a cookie to the request. Per RFC 6265 section 5.4, +// AddCookie does not attach more than one Cookie header field. That +// means all cookies, if any, are written into the same line, +// separated by semicolon. +// AddCookie only sanitizes c's name and value, and does not sanitize +// a Cookie header already present in the request. +func (r *Request) AddCookie(c *Cookie) { + s := fmt.Sprintf("%s=%s", sanitizeCookieName(c.Name), sanitizeCookieValue(c.Value)) + if c := r.Header.Get("Cookie"); c != "" { + r.Header.Set("Cookie", c+"; "+s) + } else { + r.Header.Set("Cookie", s) + } +} + +// requiresHTTP1 reports whether this request requires being sent on +// an HTTP/1 connection. +func (r *Request) requiresHTTP1() bool { + return hasToken(r.Header.Get("Connection"), "upgrade") && + EqualFold(r.Header.Get("Upgrade"), "websocket") +} + +// Cookies parses and returns the HTTP cookies sent with the request. +func (r *Request) Cookies() []*Cookie { + return readCookies(r.Header, "") +} + +// ProtoAtLeast reports whether the HTTP protocol used +// in the request is at least major.minor. +func (r *Request) ProtoAtLeast(major, minor int) bool { + return r.ProtoMajor > major || + r.ProtoMajor == major && r.ProtoMinor >= minor +} + +// extraHeaders may be nil +// waitForContinue may be nil +// always closes body +func (r *Request) write(client *hyper.ClientConn, taskData *taskData, exec *hyper.Executor) (err error) { + //trace := httptrace.ContextClientTrace(r.Context()) + //if trace != nil && trace.WroteRequest != nil { + // defer func() { + // trace.WroteRequest(httptrace.WroteRequestInfo{ + // Err: err, + // }) + // }() + //} + + // Prepare the hyper.Request + hyperReq, err := r.newHyperRequest(taskData.pc.isProxy, taskData.req.extra, taskData.req) + if err != nil { + return err + } + // Send it! + sendTask := client.Send(hyperReq) + if sendTask == nil { + println("############### write: sendTask is nil") + return errors.New("failed to send the request") + } + sendTask.SetUserdata(c.Pointer(taskData), nil) + sendRes := exec.Push(sendTask) + if sendRes != hyper.OK { + err = errors.New("failed to send the request") + } + return err +} + +func (r *Request) newHyperRequest(usingProxy bool, extraHeader Header, treq *transportRequest) (*hyper.Request, error) { + // Find the target host. Prefer the Host: header, but if that + // is not given, use the host from the request URL. + // + // Clean the host, in case it arrives with unexpected stuff in it. + host := r.Host + if host == "" { + if r.URL == nil { + return nil, errMissingHost + } + host = r.URL.Host + } + host, err := PunycodeHostPort(host) + if err != nil { + return nil, err + } + // Validate that the Host header is a valid header in general, + // but don't validate the host itself. This is sufficient to avoid + // header or request smuggling via the Host field. + // The server can (and will, if it's a net/http server) reject + // the request if it doesn't consider the host valid. + if !ValidHostHeader(host) { + // Historically, we would truncate the Host header after '/' or ' '. + // Some users have relied on this truncation to convert a network + // address such as Unix domain socket path into a valid, ignored + // Host header (see https://go.dev/issue/61431). + // + // We don't preserve the truncation, because sending an altered + // header field opens a smuggling vector. Instead, zero out the + // Host header entirely if it isn't valid. (An empty Host is valid; + // see RFC 9112 Section 3.2.) + // + // Return an error if we're sending to a proxy, since the proxy + // probably can't do anything useful with an empty Host header. + if !usingProxy { + host = "" + } else { + return nil, errors.New("http: invalid Host header") + } + } + + // According to RFC 6874, an HTTP client, proxy, or other + // intermediary must remove any IPv6 zone identifier attached + // to an outgoing URI. + host = removeZone(host) + + ruri := r.URL.RequestURI() + if usingProxy && r.URL.Scheme != "" && r.URL.Opaque == "" { + ruri = r.URL.Scheme + "://" + host + ruri + } else if r.Method == "CONNECT" && r.URL.Path == "" { + // CONNECT requests normally give just the host and port, not a full URL. + ruri = host + if r.URL.Opaque != "" { + ruri = r.URL.Opaque + } + } + if stringContainsCTLByte(ruri) { + return nil, errors.New("net/http: can't write control character in Request.URL") + } + + // Prepare the hyper request + hyperReq := hyper.NewRequest() + + // Set the request line, default HTTP/1.1 + if hyperReq.SetMethod(&[]byte(r.Method)[0], c.Strlen(c.AllocaCStr(r.Method))) != hyper.OK { + return nil, fmt.Errorf("error setting method %s\n", r.Method) + } + if hyperReq.SetURI(&[]byte(ruri)[0], c.Strlen(c.AllocaCStr(ruri))) != hyper.OK { + return nil, fmt.Errorf("error setting uri %s\n", ruri) + } + if hyperReq.SetVersion(c.Int(hyper.HTTPVersion11)) != hyper.OK { + return nil, fmt.Errorf("error setting httpversion %s\n", "HTTP/1.1") + } + + // Set the request headers + reqHeaders := hyperReq.Headers() + if reqHeaders.Set(&[]byte("Host")[0], c.Strlen(c.Str("Host")), &[]byte(host)[0], c.Strlen(c.AllocaCStr(host))) != hyper.OK { + return nil, fmt.Errorf("error setting header: Host: %s\n", host) + } + + // Use the defaultUserAgent unless the Header contains one, which + // may be blank to not send the header. + userAgent := defaultUserAgent + if r.Header.has("User-Agent") { + userAgent = r.Header.Get("User-Agent") + } + if userAgent != "" { + if reqHeaders.Set(&[]byte("User-Agent")[0], c.Strlen(c.Str("User-Agent")), &[]byte(userAgent)[0], c.Strlen(c.AllocaCStr(userAgent))) != hyper.OK { + return nil, fmt.Errorf("error setting header: User-Agent: %s\n", userAgent) + } + } + + // Process Body,ContentLength,Close,Trailer + err = r.writeHeader(reqHeaders) + if err != nil { + return nil, err + } + + err = r.Header.writeSubset(reqHeaders, reqWriteExcludeHeader) + if err != nil { + return nil, err + } + + if extraHeader != nil { + err = extraHeader.write(reqHeaders) + if err != nil { + return nil, err + } + } + + //if trace != nil && trace.WroteHeaders != nil { + // trace.WroteHeaders() + //} + + // Wait for 100-continue if expected. + if r.ProtoAtLeast(1, 1) && r.Body != nil && r.expectsContinue() { + hyperReq.OnInformational(printInformational, nil, nil) + } + + // Write body and trailer + err = r.writeBody(hyperReq, treq) + if err != nil { + return nil, err + } + + return hyperReq, nil +} + +func printInformational(userdata c.Pointer, resp *hyper.Response) { + status := resp.Status() + fmt.Println("Informational (1xx): ", status) +} + +func validMethod(method string) bool { + /* + Method = "OPTIONS" ; Section 9.2 + | "GET" ; Section 9.3 + | "HEAD" ; Section 9.4 + | "POST" ; Section 9.5 + | "PUT" ; Section 9.6 + | "DELETE" ; Section 9.7 + | "TRACE" ; Section 9.8 + | "CONNECT" ; Section 9.9 + | extension-method + extension-method = token + token = 1* + */ + return len(method) > 0 && strings.IndexFunc(method, isNotToken) == -1 +} + +// readCookies parses all "Cookie" values from the header h and +// returns the successfully parsed Cookies. +// +// if filter isn't empty, only cookies of that name are returned. +func readCookies(h Header, filter string) []*Cookie { + lines := h["Cookie"] + if len(lines) == 0 { + return []*Cookie{} + } + + cookies := make([]*Cookie, 0, len(lines)+strings.Count(lines[0], ";")) + for _, line := range lines { + line = textproto.TrimString(line) + + var part string + for len(line) > 0 { // continue since we have rest + part, line, _ = strings.Cut(line, ";") + part = textproto.TrimString(part) + if part == "" { + continue + } + name, val, _ := strings.Cut(part, "=") + name = textproto.TrimString(name) + if !isCookieNameValid(name) { + continue + } + if filter != "" && filter != name { + continue + } + val, ok := parseCookieValue(val, true) + if !ok { + continue + } + cookies = append(cookies, &Cookie{Name: name, Value: val}) + } + } + return cookies +} + +func idnaASCII(v string) (string, error) { + // TODO: Consider removing this check after verifying performance is okay. + // Right now punycode verification, length checks, context checks, and the + // permissible character tests are all omitted. It also prevents the ToASCII + // call from salvaging an invalid IDN, when possible. As a result it may be + // possible to have two IDNs that appear identical to the user where the + // ASCII-only version causes an error downstream whereas the non-ASCII + // version does not. + // Note that for correct ASCII IDNs ToASCII will only do considerably more + // work, but it will not cause an allocation. + if Is(v) { + return v, nil + } + return idna.Lookup.ToASCII(v) +} + +// removeZone removes IPv6 zone identifier from host. +// E.g., "[fe80::1%en0]:8080" to "[fe80::1]:8080" +func removeZone(host string) string { + if !strings.HasPrefix(host, "[") { + return host + } + i := strings.LastIndex(host, "]") + if i < 0 { + return host + } + j := strings.LastIndex(host[:i], "%") + if j < 0 { + return host + } + return host[:j] + host[i:] +} + +// Return value if nonempty, def otherwise. +func valueOrDefault(value, def string) string { + if value != "" { + return value + } + return def +} diff --git a/x/net/http/response.go b/x/net/http/response.go new file mode 100644 index 0000000..da7c3e4 --- /dev/null +++ b/x/net/http/response.go @@ -0,0 +1,298 @@ +package http + +import ( + "compress/gzip" + "errors" + "fmt" + "io" + "strconv" + "sync" + + "github.com/goplus/llgo/c" + "github.com/goplus/llgoexamples/rust/hyper" +) + +type Response struct { + Status string // e.g. "200 OK" + StatusCode int // e.g. 200 + Proto string // e.g. "HTTP/1.0" + ProtoMajor int // e.g. 1 + ProtoMinor int // e.g. 0 + Header Header + Body io.ReadCloser + ContentLength int64 + TransferEncoding []string + Close bool + Uncompressed bool + //Trailer Header + Request *Request +} + +func (r *Response) closeBody() { + if r.Body != nil { + r.Body.Close() + } +} + +// bodyIsWritable reports whether the Body supports writing. The +// Transport returns Writable bodies for 101 Switching Protocols +// responses. +// The Transport uses this method to determine whether a persistent +// connection is done being managed from its perspective. Once we +// return a writable response body to a user, the net/http package is +// done managing that connection. +func (r *Response) bodyIsWritable() bool { + _, ok := r.Body.(io.Writer) + return ok +} + +// Cookies parses and returns the cookies set in the Set-Cookie headers. +func (r *Response) Cookies() []*Cookie { + return readSetCookies(r.Header) +} + +func (r *Response) checkRespBody(taskData *taskData) (needContinue bool) { + pc := taskData.pc + bodyWritable := r.bodyIsWritable() + hasBody := taskData.req.Method != "HEAD" && r.ContentLength != 0 + + if r.Close || taskData.req.Close || r.StatusCode <= 199 || bodyWritable { + // Don't do keep-alive on error if either party requested a close + // or we get an unexpected informational (1xx) response. + // StatusCode 100 is already handled above. + pc.alive = false + } + + if !hasBody || bodyWritable { + replaced := pc.t.replaceReqCanceler(taskData.req.cancelKey, nil) + + // Put the idle conn back into the pool before we send the response + // so if they process it quickly and make another request, they'll + // get this same conn. But we use the unbuffered channel 'rc' + // to guarantee that persistConn.roundTrip got out of its select + // potentially waiting for this persistConn to close. + pc.alive = pc.alive && + replaced && pc.tryPutIdleConn() + + if bodyWritable { + pc.closeErr = errCallerOwnsConn + } + + select { + case taskData.resc <- responseAndError{res: r}: + case <-taskData.callerGone: + if debugSwitch { + println("############### checkRespBody callerGone") + } + closeAndRemoveIdleConn(pc, true) + return true + } + // Now that they've read from the unbuffered channel, they're safely + // out of the select that also waits on this goroutine to die, so + // we're allowed to exit now if needed (if alive is false) + if debugSwitch { + println("############### checkRespBody return") + } + closeAndRemoveIdleConn(pc, false) + return true + } + return false +} + +func (r *Response) wrapRespBody(taskData *taskData) { + body := &bodyEOFSignal{ + body: r.Body, + earlyCloseFn: func() error { + // If the response body is closed prematurely, + // the hyperBody needs to be recycled and the persistConn needs to be handled. + taskData.closeHyperBody() + select { + case <-taskData.pc.closech: + taskData.pc.t.removeIdleConn(taskData.pc) + default: + } + replaced := taskData.pc.t.replaceReqCanceler(taskData.req.cancelKey, nil) // before pc might return to idle pool + taskData.pc.alive = taskData.pc.alive && + replaced && taskData.pc.tryPutIdleConn() + return nil + }, + fn: func(err error) error { + isEOF := err == io.EOF + if !isEOF { + if cerr := taskData.pc.canceled(); cerr != nil { + return cerr + } + } + return err + }, + } + r.Body = body + // TODO(hah) gzip(wrapRespBody): The compress/gzip library still has a bug. An exception occurs when calling gzip.NewReader(). + //if taskData.addedGzip && EqualFold(r.Header.Get("Content-Encoding"), "gzip") { + // println("gzip reader") + // r.Body = &gzipReader{body: body} + // r.Header.Del("Content-Encoding") + // r.Header.Del("Content-Length") + // r.ContentLength = -1 + // r.Uncompressed = true + //} +} + +// bodyEOFSignal is used by the HTTP/1 transport when reading response +// bodies to make sure we see the end of a response body before +// proceeding and reading on the connection again. +// +// It wraps a ReadCloser but runs fn (if non-nil) at most +// once, right before its final (error-producing) Read or Close call +// returns. fn should return the new error to return from Read or Close. +// +// If earlyCloseFn is non-nil and Close is called before io.EOF is +// seen, earlyCloseFn is called instead of fn, and its return value is +// the return value from Close. +type bodyEOFSignal struct { + body io.ReadCloser + mu sync.Mutex // guards following 4 fields + closed bool // whether Close has been called + rerr error // sticky Read error + fn func(error) error // err will be nil on Read io.EOF + earlyCloseFn func() error // optional alt Close func used if io.EOF not seen +} + +var errReadOnClosedResBody = errors.New("http: read on closed response body") + +func (es *bodyEOFSignal) Read(p []byte) (n int, err error) { + es.mu.Lock() + closed, rerr := es.closed, es.rerr + es.mu.Unlock() + if closed { + return 0, errReadOnClosedResBody + } + if rerr != nil { + return 0, rerr + } + + n, err = es.body.Read(p) + if err != nil { + es.mu.Lock() + defer es.mu.Unlock() + if es.rerr == nil { + es.rerr = err + } + err = es.condfn(err) + } + return +} + +func (es *bodyEOFSignal) Close() error { + es.mu.Lock() + defer es.mu.Unlock() + if es.closed { + return nil + } + es.closed = true + if es.earlyCloseFn != nil && es.rerr != io.EOF { + return es.earlyCloseFn() + } + err := es.body.Close() + return es.condfn(err) +} + +// caller must hold es.mu. +func (es *bodyEOFSignal) condfn(err error) error { + if es.fn == nil { + return err + } + err = es.fn(err) + es.fn = nil + return err +} + +// gzipReader wraps a response body so it can lazily +// call gzip.NewReader on the first call to Read +type gzipReader struct { + _ incomparable + body *bodyEOFSignal // underlying HTTP/1 response body framing + zr *gzip.Reader // lazily-initialized gzip reader + zerr error // any error from gzip.NewReader; sticky +} + +func (gz *gzipReader) Read(p []byte) (n int, err error) { + if gz.zr == nil { + if gz.zerr == nil { + gz.zr, gz.zerr = gzip.NewReader(gz.body) + } + if gz.zerr != nil { + return 0, gz.zerr + } + } + + gz.body.mu.Lock() + if gz.body.closed { + err = errReadOnClosedResBody + } + gz.body.mu.Unlock() + + if err != nil { + return 0, err + } + return gz.zr.Read(p) +} + +func (gz *gzipReader) Close() error { + return gz.body.Close() +} + +func ReadResponse(r io.ReadCloser, req *Request, hyperResp *hyper.Response) (*Response, error) { + resp := &Response{ + Request: req, + Header: make(Header), + //Trailer: make(Header), + } + readResponseLineAndHeader(resp, hyperResp) + + fixPragmaCacheControl(req.Header) + + err := readTransfer(resp, r) + if err != nil { + return nil, err + } + return resp, nil +} + +// readResponseLineAndHeader reads the response line and header from hyper response. +func readResponseLineAndHeader(resp *Response, hyperResp *hyper.Response) { + rp := hyperResp.ReasonPhrase() + rpLen := hyperResp.ReasonPhraseLen() + + // Parse the first line of the response. + resp.Status = strconv.Itoa(int(hyperResp.Status())) + " " + c.GoString((*int8)(c.Pointer(rp)), rpLen) + resp.StatusCode = int(hyperResp.Status()) + version := int(hyperResp.Version()) + resp.ProtoMajor, resp.ProtoMinor = splitTwoDigitNumber(version) + resp.Proto = fmt.Sprintf("HTTP/%d.%d", resp.ProtoMajor, resp.ProtoMinor) + + headers := hyperResp.Headers() + headers.Foreach(appendToResponseHeader, c.Pointer(resp)) +} + +// RFC 7234, section 5.4: Should treat +// +// Pragma: no-cache +// +// like +// +// Cache-Control: no-cache +func fixPragmaCacheControl(header Header) { + if hp, ok := header["Pragma"]; ok && len(hp) > 0 && hp[0] == "no-cache" { + if _, presentcc := header["Cache-Control"]; !presentcc { + header["Cache-Control"] = []string{"no-cache"} + } + } +} + +// isProtocolSwitchHeader reports whether the request or response header +// is for a protocol switch. +func isProtocolSwitchHeader(h Header) bool { + return h.Get("Upgrade") != "" && + HeaderValuesContainsToken(h["Connection"], "Upgrade") +} diff --git a/x/net/http/server.go b/x/net/http/server.go new file mode 100644 index 0000000..f38cbd0 --- /dev/null +++ b/x/net/http/server.go @@ -0,0 +1,12 @@ +package http + +// maxPostHandlerReadBytes is the max number of Request.Body bytes not +// consumed by a handler that the server will read from the client +// in order to keep a connection alive. If there are more bytes than +// this then the server to be paranoid instead sends a "Connection: +// close" response. +// +// This number is approximately what a typical machine's TCP buffer +// size is anyway. (if we have the bytes on the machine, we might as +// well read them) +const maxPostHandlerReadBytes = 256 << 10 diff --git a/x/net/http/transfer.go b/x/net/http/transfer.go new file mode 100644 index 0000000..12f3d70 --- /dev/null +++ b/x/net/http/transfer.go @@ -0,0 +1,666 @@ +package http + +import ( + "errors" + "fmt" + "io" + "net/textproto" + "reflect" + "strconv" + "strings" + "sync" + "unicode/utf8" + + "github.com/goplus/llgo/c" + "github.com/goplus/llgo/c/os" + "github.com/goplus/llgoexamples/rust/hyper" +) + +type transferReader struct { + // Input + Header Header + StatusCode int + RequestMethod string + ProtoMajor int + ProtoMinor int + // Output + Body io.ReadCloser + ContentLength int64 + Chunked bool + Close bool +} + +// parseTransferEncoding sets t.Chunked based on the Transfer-Encoding header. +func (t *transferReader) parseTransferEncoding() error { + raw, present := t.Header["Transfer-Encoding"] + if !present { + return nil + } + delete(t.Header, "Transfer-Encoding") + + // Issue 12785; ignore Transfer-Encoding on HTTP/1.0 requests. + if !t.protoAtLeast(1, 1) { + return nil + } + + // Like nginx, we only support a single Transfer-Encoding header field, and + // only if set to "chunked". This is one of the most security sensitive + // surfaces in HTTP/1.1 due to the risk of request smuggling, so we keep it + // strict and simple. + if len(raw) != 1 { + return &unsupportedTEError{fmt.Sprintf("too many transfer encodings: %q", raw)} + } + if !EqualFold(raw[0], "chunked") { + return &unsupportedTEError{fmt.Sprintf("unsupported transfer encoding: %q", raw[0])} + } + + // RFC 7230 3.3.2 says "A sender MUST NOT send a Content-Length header field + // in any message that contains a Transfer-Encoding header field." + // + // but also: "If a message is received with both a Transfer-Encoding and a + // Content-Length header field, the Transfer-Encoding overrides the + // Content-Length. Such a message might indicate an attempt to perform + // request smuggling (Section 9.5) or response splitting (Section 9.4) and + // ought to be handled as an error. A sender MUST remove the received + // Content-Length field prior to forwarding such a message downstream." + // + // Reportedly, these appear in the wild. + delete(t.Header, "Content-Length") + + t.Chunked = true + return nil +} + +func (t *transferReader) protoAtLeast(m, n int) bool { + return t.ProtoMajor > m || (t.ProtoMajor == m && t.ProtoMinor >= n) +} + +// NoBody is an io.ReadCloser with no bytes. Read always returns EOF +// and Close always returns nil. It can be used in an outgoing client +// request to explicitly signal that a request has zero bytes. +// An alternative, however, is to simply set Request.Body to nil. +var NoBody = noBody{} + +type noBody struct{} + +func (noBody) Read([]byte) (int, error) { return 0, io.EOF } +func (noBody) Close() error { return nil } +func (noBody) WriteTo(io.Writer) (int64, error) { return 0, nil } + +// unsupportedTEError reports unsupported transfer-encodings. +type unsupportedTEError struct { + err string +} + +func (uste *unsupportedTEError) Error() string { + return uste.err +} + +// msg is *Request or *Response. +func readTransfer(msg any, r io.ReadCloser) (err error) { + t := &transferReader{RequestMethod: "GET"} + + // Unify input + isResponse := false + switch rr := msg.(type) { + case *Response: + t.Header = rr.Header + t.StatusCode = rr.StatusCode + t.ProtoMajor = rr.ProtoMajor + t.ProtoMinor = rr.ProtoMinor + t.Close = shouldClose(t.ProtoMajor, t.ProtoMinor, t.Header, true) + isResponse = true + if rr.Request != nil { + t.RequestMethod = rr.Request.Method + } + case *Request: + t.Header = rr.Header + t.RequestMethod = rr.Method + t.ProtoMajor = rr.ProtoMajor + t.ProtoMinor = rr.ProtoMinor + // Transfer semantics for Requests are exactly like those for + // Responses with status code 200, responding to a GET method + t.StatusCode = 200 + t.Close = rr.Close + default: + panic("unexpected type") + } + + // Default to HTTP/1.1 + if t.ProtoMajor == 0 && t.ProtoMinor == 0 { + t.ProtoMajor, t.ProtoMinor = 1, 1 + } + + // Transfer-Encoding: chunked, and overriding Content-Length. + if err = t.parseTransferEncoding(); err != nil { + return err + } + + realLength, err := fixLength(isResponse, t.StatusCode, t.RequestMethod, t.Header, t.Chunked) + if err != nil { + return err + } + if isResponse && t.RequestMethod == "HEAD" { + if n, err := parseContentLength(t.Header.get("Content-Length")); err != nil { + return err + } else { + t.ContentLength = n + } + } else { + t.ContentLength = realLength + } + + // If there is no Content-Length or chunked Transfer-Encoding on a *Response + // and the status is not 1xx, 204 or 304, then the body is unbounded. + // See RFC 7230, section 3.3. + switch msg.(type) { + case *Response: + if realLength == -1 && !t.Chunked && bodyAllowedForStatus(t.StatusCode) { + // Unbounded body. + t.Close = true + } + } + + // Prepare body reader. ContentLength < 0 means chunked encoding + // or close connection when finished, since multipart is not supported yet + switch { + case t.Chunked: + if isResponse && noResponseBodyExpected(t.RequestMethod) || !bodyAllowedForStatus(t.StatusCode) { + t.Body = NoBody + } else { + t.Body = &body{src: r, closer: r, hdr: msg, r: r, closing: t.Close} + } + case realLength == 0: + t.Body = NoBody + case realLength > 0: + t.Body = &body{src: io.LimitReader(r, realLength), closer: r, closing: t.Close} + default: + // realLength < 0, i.e. "Content-Length" not mentioned in header + if t.Close { + // Close semantics (i.e. HTTP/1.0) + t.Body = &body{src: r, closer: r, closing: t.Close} + } else { + // Persistent connection (i.e. HTTP/1.1) + t.Body = NoBody + } + } + + // Unify output + switch rr := msg.(type) { + case *Request: + rr.Body = t.Body + rr.ContentLength = t.ContentLength + if t.Chunked { + rr.TransferEncoding = []string{"chunked"} + } + rr.Close = t.Close + //rr.Trailer = t.Trailer + case *Response: + rr.Body = t.Body + rr.ContentLength = t.ContentLength + if t.Chunked { + rr.TransferEncoding = []string{"chunked"} + } + rr.Close = t.Close + //rr.Trailer = t.Trailer + } + + return nil +} + +// Determine the expected body length, using RFC 7230 Section 3.3. This +// function is not a method, because ultimately it should be shared by +// ReadResponse and ReadRequest. +func fixLength(isResponse bool, status int, requestMethod string, header Header, chunked bool) (int64, error) { + isRequest := !isResponse + contentLens := header["Content-Length"] + + // Hardening against HTTP request smuggling + if len(contentLens) > 1 { + // Per RFC 7230 Section 3.3.2, prevent multiple + // Content-Length headers if they differ in value. + // If there are dups of the value, remove the dups. + // See Issue 16490. + first := textproto.TrimString(contentLens[0]) + for _, ct := range contentLens[1:] { + if first != textproto.TrimString(ct) { + return 0, fmt.Errorf("http: message cannot contain multiple Content-Length headers; got %q", contentLens) + } + } + + // deduplicate Content-Length + header.Del("Content-Length") + header.Add("Content-Length", first) + + contentLens = header["Content-Length"] + } + + // Logic based on response type or status + if isResponse && noResponseBodyExpected(requestMethod) { + return 0, nil + } + if status/100 == 1 { + return 0, nil + } + switch status { + case 204, 304: + return 0, nil + } + + // Logic based on Transfer-Encoding + if chunked { + return -1, nil + } + + // Logic based on Content-Length + var cl string + if len(contentLens) == 1 { + cl = textproto.TrimString(contentLens[0]) + } + if cl != "" { + n, err := parseContentLength(cl) + if err != nil { + return -1, err + } + return n, nil + } + header.Del("Content-Length") + + if isRequest { + // RFC 7230 neither explicitly permits nor forbids an + // entity-body on a GET request so we permit one if + // declared, but we default to 0 here (not -1 below) + // if there's no mention of a body. + // Likewise, all other request methods are assumed to have + // no body if neither Transfer-Encoding chunked nor a + // Content-Length are set. + return 0, nil + } + + // Body-EOF logic based on other methods (like closing, or chunked coding) + return -1, nil +} + +// parseContentLength trims whitespace from s and returns -1 if no value +// is set, or the value if it's >= 0. +func parseContentLength(cl string) (int64, error) { + cl = textproto.TrimString(cl) + if cl == "" { + return -1, nil + } + n, err := strconv.ParseUint(cl, 10, 63) + if err != nil { + return 0, badStringError("bad Content-Length", cl) + } + return int64(n), nil + +} + +// body turns a Reader into a ReadCloser. +// Close ensures that the body has been fully read +// and then reads the trailer if necessary. +type body struct { + src io.Reader + closer io.Closer + hdr any // non-nil (Response or Request) value means read trailer + r io.Reader // underlying wire-format reader for the trailer + closing bool // is the connection to be closed after reading body? + doEarlyClose bool // whether Close should stop early + + mu sync.Mutex // guards following, and calls to Read and Close + sawEOF bool + closed bool + earlyClose bool // Close called and we didn't read to the end of src + onHitEOF func() // if non-nil, func to call when EOF is Read +} + +// ErrBodyReadAfterClose is returned when reading a Request or Response +// Body after the body has been closed. This typically happens when the body is +// read after an HTTP Handler calls WriteHeader or Write on its +// ResponseWriter. +var ErrBodyReadAfterClose = errors.New("http: invalid Read on closed Body") + +func (b *body) Read(p []byte) (n int, err error) { + b.mu.Lock() + defer b.mu.Unlock() + if b.closed { + return 0, ErrBodyReadAfterClose + } + return b.readLocked(p) +} + +// Must hold b.mu. +func (b *body) readLocked(p []byte) (n int, err error) { + if b.sawEOF { + return 0, io.EOF + } + n, err = b.src.Read(p) + + if err == io.EOF { + b.sawEOF = true + // Chunked case. Read the trailer. + if b.hdr != nil { + b.hdr = nil + } else { + // If the server declared the Content-Length, our body is a LimitedReader + // and we need to check whether this EOF arrived early. + if lr, ok := b.src.(*io.LimitedReader); ok && lr.N > 0 { + err = io.ErrUnexpectedEOF + } + } + } + + // If we can return an EOF here along with the read data, do + // so. This is optional per the io.Reader contract, but doing + // so helps the HTTP transport code recycle its connection + // earlier (since it will see this EOF itself), even if the + // client doesn't do future reads or Close. + if err == nil && n > 0 { + if lr, ok := b.src.(*io.LimitedReader); ok && lr.N == 0 { + err = io.EOF + b.sawEOF = true + } + } + + if b.sawEOF && b.onHitEOF != nil { + b.onHitEOF() + } + + return n, err +} + +// unreadDataSizeLocked returns the number of bytes of unread input. +// It returns -1 if unknown. +// b.mu must be held. +func (b *body) unreadDataSizeLocked() int64 { + if lr, ok := b.src.(*io.LimitedReader); ok { + return lr.N + } + return -1 +} + +func (b *body) Close() error { + b.mu.Lock() + defer b.mu.Unlock() + if b.closed { + return nil + } + var err error + switch { + case b.sawEOF: + // Already saw EOF, so no need going to look for it. + case b.hdr == nil && b.closing: + // no trailer and closing the connection next. + // no point in reading to EOF. + case b.doEarlyClose: + // Read up to maxPostHandlerReadBytes bytes of the body, looking + // for EOF (and trailers), so we can re-use this connection. + if lr, ok := b.src.(*io.LimitedReader); ok && lr.N > maxPostHandlerReadBytes { + // There was a declared Content-Length, and we have more bytes remaining + // than our maxPostHandlerReadBytes tolerance. So, give up. + b.earlyClose = true + } else { + var n int64 + // Consume the body, or, which will also lead to us reading + // the trailer headers after the body, if present. + n, err = io.CopyN(io.Discard, bodyLocked{b}, maxPostHandlerReadBytes) + if err == io.EOF { + err = nil + } + if n == maxPostHandlerReadBytes { + b.earlyClose = true + } + } + default: + // Fully consume the body, which will also lead to us reading + // the trailer headers after the body, if present. + _, err = io.Copy(io.Discard, bodyLocked{b}) + } + b.closed = true + + // Close bodyChunk + if b.closer != nil { + closeErr := b.closer.Close() + if err == nil { + err = closeErr + } + } + + return err +} + +// bodyLocked is an io.Reader reading from a *body when its mutex is +// already held. +type bodyLocked struct { + b *body +} + +func (bl bodyLocked) Read(p []byte) (n int, err error) { + if bl.b.closed { + return 0, ErrBodyReadAfterClose + } + return bl.b.readLocked(p) +} + +func (b *body) didEarlyClose() bool { + b.mu.Lock() + defer b.mu.Unlock() + return b.earlyClose +} + +// bodyRemains reports whether future Read calls might +// yield data. +func (b *body) bodyRemains() bool { + b.mu.Lock() + defer b.mu.Unlock() + return !b.sawEOF +} + +func (b *body) registerOnHitEOF(fn func()) { + b.mu.Lock() + defer b.mu.Unlock() + b.onHitEOF = fn +} + +// foreachHeaderElement splits v according to the "#rule" construction +// in RFC 7230 section 7 and calls fn for each non-empty element. +func foreachHeaderElement(v string, fn func(string)) { + v = textproto.TrimString(v) + if v == "" { + return + } + if !strings.Contains(v, ",") { + fn(v) + return + } + for _, f := range strings.Split(v, ",") { + if f = textproto.TrimString(f); f != "" { + fn(f) + } + } +} + +func noResponseBodyExpected(requestMethod string) bool { + return requestMethod == "HEAD" +} + +func badStringError(what, val string) error { return fmt.Errorf("%s %q", what, val) } + +// bodyAllowedForStatus reports whether a given response status code +// permits a body. See RFC 7230, section 3.3. +func bodyAllowedForStatus(status int) bool { + switch { + case status >= 100 && status <= 199: + return false + case status == 204: + return false + case status == 304: + return false + } + return true +} + +// Determine whether to hang up after write a request and body, or +// receiving a response and body +// 'header' is the request headers. +func shouldClose(major, minor int, header Header, removeCloseHeader bool) bool { + if major < 1 { + return true + } + + conv := header["Connection"] + hasClose := HeaderValuesContainsToken(conv, "close") + if major == 1 && minor == 0 { + return hasClose || !HeaderValuesContainsToken(conv, "keep-alive") + } + + if hasClose && removeCloseHeader { + header.Del("Connection") + } + + return hasClose +} + +// tokenEqual reports whether t1 and t2 are equal, ASCII case-insensitively. +func tokenEqual(t1, t2 string) bool { + if len(t1) != len(t2) { + return false + } + for i, b := range t1 { + if b >= utf8.RuneSelf { + // No UTF-8 or non-ASCII allowed in tokens. + return false + } + if lowerASCII(byte(b)) != lowerASCII(t2[i]) { + return false + } + } + return true +} + +// trimOWS returns x with all optional whitespace removes from the +// beginning and end. +func trimOWS(x string) string { + // TODO: consider using strings.Trim(x, " \t") instead, + // if and when it's fast enough. See issue 10292. + // But this ASCII-only code will probably always beat UTF-8 + // aware code. + for len(x) > 0 && isOWS(x[0]) { + x = x[1:] + } + for len(x) > 0 && isOWS(x[len(x)-1]) { + x = x[:len(x)-1] + } + return x +} + +// lowerASCII returns the ASCII lowercase version of b. +func lowerASCII(b byte) byte { + if 'A' <= b && b <= 'Z' { + return b + ('a' - 'A') + } + return b +} + +// isOWS reports whether b is an optional whitespace byte, as defined +// by RFC 7230 section 3.2.3. +func isOWS(b byte) bool { return b == ' ' || b == '\t' } + +// writeHeader Write Content-Length and/or Transfer-Encoding and/or Trailer header +func (r *Request) writeHeader(reqHeaders *hyper.Headers) error { + if r.Close && !hasToken(r.Header.get("Connection"), "close") { + if reqHeaders.Set(&[]byte("Connection")[0], c.Strlen(c.Str("Connection")), &[]byte("close")[0], c.Strlen(c.Str("close"))) != hyper.OK { + return fmt.Errorf("error setting header: Connection: %s\n", "close") + } + } + + // 'Content-Length' and 'Transfer-Encoding:chunked' are already handled by hyper + + // Write Trailer header + + return nil +} + +var nopCloserType = reflect.TypeOf(io.NopCloser(nil)) +var nopCloserWriterToType = reflect.TypeOf(io.NopCloser(struct { + io.Reader + io.WriterTo +}{})) + +// unwrapNopCloser return the underlying reader and true if r is a NopCloser +// else it return false. +func unwrapNopCloser(r io.Reader) (underlyingReader io.Reader, isNopCloser bool) { + switch reflect.TypeOf(r) { + case nopCloserType, nopCloserWriterToType: + return reflect.ValueOf(r).Field(0).Interface().(io.Reader), true + default: + return nil, false + } +} + +// unwrapBody unwraps the body's inner reader if it's a +// nopCloser. This is to ensure that body writes sourced from local +// files (*os.File types) are properly optimized. +// +// This function is only intended for use in writeBody. +func (r *Request) unwrapBody() io.Reader { + if r, ok := unwrapNopCloser(r.Body); ok { + return r + } + if r, ok := r.Body.(*readTrackingBody); ok { + r.didRead = true + return r.ReadCloser + } + return r.Body +} + +func (r *Request) writeBody(hyperReq *hyper.Request, treq *transportRequest) error { + if r.Body != nil { + var body = r.unwrapBody() + hyperReqBody := hyper.NewBody() + buf := make([]byte, defaultChunkSize) + reqData := &bodyReq{ + body: body, + buf: buf, + treq: treq, + } + hyperReqBody.SetUserdata(c.Pointer(reqData), nil) + hyperReqBody.SetDataFunc(setPostData) + hyperReq.SetBody(hyperReqBody) + } + return nil +} + +type bodyReq struct { + body io.Reader + buf []byte + treq *transportRequest +} + +func setPostData(userdata c.Pointer, ctx *hyper.Context, chunk **hyper.Buf) c.Int { + req := (*bodyReq)(userdata) + n, err := req.body.Read(req.buf) + if err != nil { + if err == io.EOF { + *chunk = nil + req.treq.closeBody() + return hyper.PollReady + } + fmt.Println("error reading request body: ", err) + req.treq.setError(requestBodyReadError{err}) + return hyper.PollError + } + if n > 0 { + *chunk = hyper.CopyBuf(&req.buf[0], uintptr(n)) + return hyper.PollReady + } + if n == 0 { + *chunk = nil + req.treq.closeBody() + return hyper.PollReady + } + req.treq.closeBody() + err = fmt.Errorf("error reading request body: %s\n", c.GoString(c.Strerror(os.Errno))) + req.treq.setError(requestBodyReadError{err}) + return hyper.PollError +} diff --git a/x/net/http/transport.go b/x/net/http/transport.go new file mode 100644 index 0000000..e47bd2a --- /dev/null +++ b/x/net/http/transport.go @@ -0,0 +1,2495 @@ +package http + +import ( + "container/list" + "context" + "errors" + "fmt" + "hash/fnv" + "io" + "log" + "net/url" + "sync" + "sync/atomic" + "time" + "unsafe" + + "github.com/goplus/llgo/c" + "github.com/goplus/llgo/c/libuv" + cnet "github.com/goplus/llgo/c/net" + "github.com/goplus/llgo/c/syscall" + "github.com/goplus/llgoexamples/rust/hyper" + "github.com/goplus/llgoexamples/x/net" +) + +// DefaultTransport is the default implementation of Transport and is +// used by DefaultClient. It establishes network connections as needed +// and caches them for reuse by subsequent calls. It uses HTTP proxies +// as directed by the environment variables HTTP_PROXY, HTTPS_PROXY +// and NO_PROXY (or the lowercase versions thereof). +var DefaultTransport RoundTripper = &Transport{ + Proxy: nil, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, +} + +// DefaultMaxIdleConnsPerHost is the default value of Transport's +// MaxIdleConnsPerHost. +const DefaultMaxIdleConnsPerHost = 2 +const _SC_NPROCESSORS_ONLN c.Int = 58 + +// Debug switch provided for developers +const ( + debugSwitch = true + debugReadWriteLoop = true +) + +type Transport struct { + idleMu sync.Mutex + closeIdle bool // user has requested to close all idle conns + idleConn map[connectMethodKey][]*persistConn // most recently used at end + idleConnWait map[connectMethodKey]wantConnQueue // waiting getConns + idleLRU connLRU + + reqMu sync.Mutex + reqCanceler map[cancelKey]func(error) + + altProto atomic.Value // of nil or map[string]RoundTripper, key is URI scheme + + connsPerHostMu sync.Mutex + connsPerHost map[connectMethodKey]int + connsPerHostWait map[connectMethodKey]wantConnQueue // waiting getConns + + Proxy func(*Request) (*url.URL, error) + + DisableKeepAlives bool + DisableCompression bool + + MaxIdleConns int + MaxIdleConnsPerHost int + MaxConnsPerHost int + IdleConnTimeout time.Duration + + loopsMu sync.Mutex + loops []*clientEventLoop + isClosing atomic.Bool + //curLoop atomic.Uint32 +} + +// A cancelKey is the key of the reqCanceler map. +// We wrap the *Request in this type since we want to use the original request, +// not any transient one created by roundTrip. +type cancelKey struct { + req *Request +} + +// incomparable is a zero-width, non-comparable type. Adding it to a struct +// makes that struct also non-comparable, and generally doesn't add +// any size (as long as it's first). +type incomparable [0]func() + +// responseAndError is how the goroutine reading from an HTTP/1 server +// communicates with the goroutine doing the RoundTrip. +type responseAndError struct { + _ incomparable + res *Response // else use this response (see res method) + err error +} + +type timeoutData struct { + timeoutch chan struct{} + taskData *taskData +} + +type readTrackingBody struct { + io.ReadCloser + didRead bool + didClose bool +} + +func (r *readTrackingBody) Read(data []byte) (int, error) { + r.didRead = true + return r.ReadCloser.Read(data) +} + +func (r *readTrackingBody) Close() error { + r.didClose = true + return r.ReadCloser.Close() +} + +// setupRewindBody returns a new request with a custom body wrapper +// that can report whether the body needs rewinding. +// This lets rewindBody avoid an error result when the request +// does not have GetBody but the body hasn't been read at all yet. +func setupRewindBody(req *Request) *Request { + if req.Body == nil || req.Body == NoBody { + return req + } + newReq := *req + newReq.Body = &readTrackingBody{ReadCloser: req.Body} + return &newReq +} + +// rewindBody returns a new request with the body rewound. +// It returns req unmodified if the body does not need rewinding. +// rewindBody takes care of closing req.Body when appropriate +// (in all cases except when rewindBody returns req unmodified). +func rewindBody(req *Request) (rewound *Request, err error) { + if req.Body == nil || req.Body == NoBody || (!req.Body.(*readTrackingBody).didRead && !req.Body.(*readTrackingBody).didClose) { + return req, nil // nothing to rewind + } + if !req.Body.(*readTrackingBody).didClose { + req.closeBody() + } + if req.GetBody == nil { + return nil, errCannotRewind + } + body, err := req.GetBody() + if err != nil { + return nil, err + } + newReq := *req + newReq.Body = &readTrackingBody{ReadCloser: body} + return &newReq, nil +} + +// transportRequest is a wrapper around a *Request that adds +// optional extra headers to write and stores any error to return +// from roundTrip. +type transportRequest struct { + *Request // original request, not to be mutated + extra Header // extra headers to write, or nil + //trace *httptrace.ClientTrace // optional + cancelKey cancelKey + + mu sync.Mutex // guards err + err error // first setError value for mapRoundTripError to consider +} + +func (tr *transportRequest) extraHeaders() Header { + if tr.extra == nil { + tr.extra = make(Header) + } + return tr.extra +} + +func (tr *transportRequest) setError(err error) { + tr.mu.Lock() + if tr.err == nil { + tr.err = err + } + tr.mu.Unlock() +} + +func (t *Transport) putOrCloseIdleConn(pconn *persistConn) { + if err := t.tryPutIdleConn(pconn); err != nil { + if debugSwitch { + println("############### putOrCloseIdleConn: close") + } + pconn.close(err) + } +} + +func (t *Transport) maxIdleConnsPerHost() int { + if v := t.MaxIdleConnsPerHost; v != 0 { + return v + } + return DefaultMaxIdleConnsPerHost +} + +// tryPutIdleConn adds pconn to the list of idle persistent connections awaiting +// a new request. +// If pconn is no longer needed or not in a good state, tryPutIdleConn returns +// an error explaining why it wasn't registered. +// tryPutIdleConn does not close pconn. Use putOrCloseIdleConn instead for that. +func (t *Transport) tryPutIdleConn(pconn *persistConn) error { + if t.DisableKeepAlives || t.MaxIdleConnsPerHost < 0 { + return errKeepAlivesDisabled + } + if pconn.isBroken() { + return errConnBroken + } + pconn.markReused() + + t.idleMu.Lock() + defer t.idleMu.Unlock() + + // HTTP/2 (pconn.alt != nil) connections do not come out of the idle list, + // because multiple goroutines can use them simultaneously. + // If this is an HTTP/2 connection being “returned,” we're done. + if pconn.alt != nil && t.idleLRU.m[pconn] != nil { + return nil + } + + // Deliver pconn to goroutine waiting for idle connection, if any. + // (They may be actively dialing, but this conn is ready first. + // Chrome calls this socket late binding. + // See https://www.chromium.org/developers/design-documents/network-stack#TOC-Connection-Management.) + key := pconn.cacheKey + if q, ok := t.idleConnWait[key]; ok { + done := false + if pconn.alt == nil { + // HTTP/1. + // Loop over the waiting list until we find a w that isn't done already, and hand it pconn. + for q.len() > 0 { + w := q.popFront() + if w.tryDeliver(pconn, nil) { + done = true + break + } + } + } else { + // HTTP/2. + // Can hand the same pconn to everyone in the waiting list, + // and we still won't be done: we want to put it in the idle + // list unconditionally, for any future clients too. + for q.len() > 0 { + w := q.popFront() + w.tryDeliver(pconn, nil) + } + } + if q.len() == 0 { + delete(t.idleConnWait, key) + } else { + t.idleConnWait[key] = q + } + if done { + return nil + } + } + + if t.closeIdle { + return errCloseIdle + } + if t.idleConn == nil { + t.idleConn = make(map[connectMethodKey][]*persistConn) + } + idles := t.idleConn[key] + if len(idles) >= t.maxIdleConnsPerHost() { + return errTooManyIdleHost + } + for _, exist := range idles { + if exist == pconn { + log.Fatalf("dup idle pconn %p in freelist", pconn) + } + } + t.idleConn[key] = append(idles, pconn) + t.idleLRU.add(pconn) + if t.MaxIdleConns != 0 && t.idleLRU.len() > t.MaxIdleConns { + oldest := t.idleLRU.removeOldest() + if debugSwitch { + println("############### tryPutIdleConn: removeOldest") + } + oldest.close(errTooManyIdle) + t.removeIdleConnLocked(oldest) + } + + // Set idle timer, but only for HTTP/1 (pconn.alt == nil). + // The HTTP/2 implementation manages the idle timer itself + // (see idleConnTimeout in h2_bundle.go). + idleConnTimeout := uint64(t.IdleConnTimeout.Milliseconds()) + if t.IdleConnTimeout > 0 && pconn.alt == nil { + if pconn.idleTimer != nil { + pconn.idleTimer.Start(onIdleConnTimeout, idleConnTimeout, 0) + } else { + pconn.idleTimer = &libuv.Timer{} + libuv.InitTimer(pconn.eventLoop.loop, pconn.idleTimer) + (*libuv.Handle)(c.Pointer(pconn.idleTimer)).SetData(c.Pointer(pconn)) + pconn.idleTimer.Start(onIdleConnTimeout, idleConnTimeout, 0) + } + } + pconn.idleAt = time.Now() + return nil +} + +func onIdleConnTimeout(timer *libuv.Timer) { + pconn := (*persistConn)((*libuv.Handle)(c.Pointer(timer)).GetData()) + isClose := pconn.closeConnIfStillIdle() + if isClose { + timer.Stop() + } else { + timer.Start(onIdleConnTimeout, 0, 0) + } +} + +// queueForIdleConn queues w to receive the next idle connection for w.cm. +// As an optimization hint to the caller, queueForIdleConn reports whether +// it successfully delivered an already-idle connection. +func (t *Transport) queueForIdleConn(w *wantConn) (delivered bool) { + if t.DisableKeepAlives { + return false + } + + t.idleMu.Lock() + defer t.idleMu.Unlock() + + // Stop closing connections that become idle - we might want one. + // (That is, undo the effect of t.CloseIdleConnections.) + t.closeIdle = false + + if w == nil { + // Happens in test hook. + return false + } + + // If IdleConnTimeout is set, calculate the oldest + // persistConn.idleAt time we're willing to use a cached idle + // conn. + var oldTime time.Time + if t.IdleConnTimeout > 0 { + oldTime = time.Now().Add(-t.IdleConnTimeout) + } + // Look for most recently-used idle connection. + if list, ok := t.idleConn[w.key]; ok { + stop := false + delivered := false + for len(list) > 0 && !stop { + pconn := list[len(list)-1] + + // See whether this connection has been idle too long, considering + // only the wall time (the Round(0)), in case this is a laptop or VM + // coming out of suspend with previously cached idle connections. + // FIXME: Round() is not supported in llgo + //tooOld := !oldTime.IsZero() && pconn.idleAt.Round(0).Before(oldTime) + tooOld := !oldTime.IsZero() && pconn.idleAt.Before(oldTime) + if tooOld { + // Async cleanup. Launch in its own goroutine (as if a + // time.AfterFunc called it); it acquires idleMu, which we're + // holding, and does a synchronous net.Conn.Close. + pconn.closeConnIfStillIdleLocked() + } + if pconn.isBroken() || tooOld { + // If either persistConn.readLoop has marked the connection + // broken, but Transport.removeIdleConn has not yet removed it + // from the idle list, or if this persistConn is too old (it was + // idle too long), then ignore it and look for another. In both + // cases it's already in the process of being closed. + list = list[:len(list)-1] + continue + } + delivered = w.tryDeliver(pconn, nil) + if delivered { + if pconn.alt != nil { + // HTTP/2: multiple clients can share pconn. + // Leave it in the list. + } else { + // HTTP/1: only one client can use pconn. + // Remove it from the list. + t.idleLRU.remove(pconn) + list = list[:len(list)-1] + } + } + stop = true + } + if len(list) > 0 { + t.idleConn[w.key] = list + } else { + delete(t.idleConn, w.key) + } + if stop { + return delivered + } + } + + // Register to receive next connection that becomes idle. + if t.idleConnWait == nil { + t.idleConnWait = make(map[connectMethodKey]wantConnQueue) + } + q := t.idleConnWait[w.key] + q.cleanFront() + q.pushBack(w) + t.idleConnWait[w.key] = q + return false +} + +// removeIdleConn marks pconn as dead. +func (t *Transport) removeIdleConn(pconn *persistConn) bool { + t.idleMu.Lock() + defer t.idleMu.Unlock() + return t.removeIdleConnLocked(pconn) +} + +// t.idleMu must be held. +func (t *Transport) removeIdleConnLocked(pconn *persistConn) bool { + if pconn.idleTimer != nil && (*libuv.Handle)(c.Pointer(pconn.idleTimer)).IsClosing() == 0 { + pconn.idleTimer.Stop() + (*libuv.Handle)(c.Pointer(pconn.idleTimer)).Close(nil) + pconn.idleTimer = nil + } + t.idleLRU.remove(pconn) + key := pconn.cacheKey + pconns := t.idleConn[key] + var removed bool + switch len(pconns) { + case 0: + // Nothing + case 1: + if pconns[0] == pconn { + delete(t.idleConn, key) + removed = true + } + default: + for i, v := range pconns { + if v != pconn { + continue + } + // Slide down, keeping most recently-used + // conns at the end. + copy(pconns[i:], pconns[i+1:]) + t.idleConn[key] = pconns[:len(pconns)-1] + removed = true + break + } + } + return removed +} + +func (t *Transport) setReqCanceler(key cancelKey, fn func(error)) { + t.reqMu.Lock() + defer t.reqMu.Unlock() + if t.reqCanceler == nil { + t.reqCanceler = make(map[cancelKey]func(error)) + } + if fn != nil { + t.reqCanceler[key] = fn + } else { + delete(t.reqCanceler, key) + } +} + +// replaceReqCanceler replaces an existing cancel function. If there is no cancel function +// for the request, we don't set the function and return false. +// Since CancelRequest will clear the canceler, we can use the return value to detect if +// the request was canceled since the last setReqCancel call. +func (t *Transport) replaceReqCanceler(key cancelKey, fn func(error)) bool { + t.reqMu.Lock() + defer t.reqMu.Unlock() + _, ok := t.reqCanceler[key] + if !ok { + return false + } + if fn != nil { + t.reqCanceler[key] = fn + } else { + delete(t.reqCanceler, key) + } + return true +} + +func (t *Transport) connectMethodForRequest(treq *transportRequest, loop *clientEventLoop) (cm connectMethod, err error) { + cm.targetScheme = treq.URL.Scheme + cm.targetAddr = canonicalAddr(treq.URL) + if t.Proxy != nil { + cm.proxyURL, err = t.Proxy(treq.Request) + } + cm.onlyH1 = treq.requiresHTTP1() + cm.eventLoop = loop + return cm, err +} + +// alternateRoundTripper returns the alternate RoundTripper to use +// for this request if the Request's URL scheme requires one, +// or nil for the normal case of using the Transport. +func (t *Transport) alternateRoundTripper(req *Request) RoundTripper { + if !t.useRegisteredProtocol(req) { + return nil + } + altProto, _ := t.altProto.Load().(map[string]RoundTripper) + return altProto[req.URL.Scheme] +} + +// useRegisteredProtocol reports whether an alternate protocol (as registered +// with Transport.RegisterProtocol) should be respected for this request. +func (t *Transport) useRegisteredProtocol(req *Request) bool { + // If this request requires HTTP/1, don't use the + // "https" alternate protocol, which is used by the + // HTTP/2 code to take over requests if there's an + // existing cached HTTP/2 connection. + return !(req.URL.Scheme == "https" && req.requiresHTTP1()) +} + +// CancelRequest cancels an in-flight request by closing its connection. +// CancelRequest should only be called after RoundTrip has returned. +// +// Deprecated: Use Request.WithContext to create a request with a +// cancelable context instead. CancelRequest cannot cancel HTTP/2 +// requests. +func (t *Transport) CancelRequest(req *Request) { + t.cancelRequest(cancelKey{req}, errRequestCanceled) +} + +// Cancel an in-flight request, recording the error value. +// Returns whether the request was canceled. +func (t *Transport) cancelRequest(key cancelKey, err error) bool { + // This function must not return until the cancel func has completed. + // See: https://golang.org/issue/34658 + t.reqMu.Lock() + defer t.reqMu.Unlock() + cancel := t.reqCanceler[key] + delete(t.reqCanceler, key) + if cancel != nil { + cancel(err) + } + + return cancel != nil +} + +func (t *Transport) Close() { + if t != nil && !t.isClosing.Swap(true) { + t.CloseIdleConnections() + for _, el := range t.loops { + el.Close() + } + } +} + +type clientEventLoop struct { + // libuv and hyper related + loop *libuv.Loop + async *libuv.Async + exec *hyper.Executor + isRunning atomic.Bool + isClosing atomic.Bool +} + +func (el *clientEventLoop) Close() { + if el != nil && !el.isClosing.Swap(true) { + if el.loop != nil && (*libuv.Handle)(c.Pointer(el.loop)).IsClosing() == 0 { + el.loop.Close() + el.loop = nil + } + if el.async != nil && (*libuv.Handle)(c.Pointer(el.async)).IsClosing() == 0 { + el.async.Close(nil) + el.async = nil + } + if el.exec != nil { + el.exec.Free() + el.exec = nil + } + } +} + +func (el *clientEventLoop) run() { + if el.isRunning.Load() { + return + } + + el.loop.Async(el.async, nil) + + checker := &libuv.Idle{} + libuv.InitIdle(el.loop, checker) + (*libuv.Handle)(c.Pointer(checker)).SetData(c.Pointer(el)) + checker.Start(readWriteLoop) + + go el.loop.Run(libuv.RUN_DEFAULT) + + el.isRunning.Store(true) +} + +// ---------------------------------------------------------- + +func getMilliseconds(deadline time.Time) uint64 { + microseconds := deadline.Sub(time.Now()).Microseconds() + milliseconds := microseconds / 1e3 + if microseconds%1e3 != 0 { + milliseconds += 1 + } + return uint64(milliseconds) +} + +var cpuCount int + +func init() { + cpuCount = int(c.Sysconf(_SC_NPROCESSORS_ONLN)) + if cpuCount <= 0 { + cpuCount = 4 + } +} + +func (t *Transport) getOrInitClientEventLoop(i uint32) *clientEventLoop { + if el := t.loops[i]; el != nil { + return el + } + + eventLoop := &clientEventLoop{ + loop: libuv.LoopNew(), + async: &libuv.Async{}, + exec: hyper.NewExecutor(), + } + + eventLoop.run() + + t.loops[i] = eventLoop + return eventLoop +} + +func (t *Transport) getClientEventLoop(req *Request) *clientEventLoop { + t.loopsMu.Lock() + defer t.loopsMu.Unlock() + if t.loops == nil { + t.loops = make([]*clientEventLoop, cpuCount) + } + + key := t.getLoopKey(req) + h := fnv.New32a() + h.Write([]byte(key)) + hashcode := h.Sum32() + + return t.getOrInitClientEventLoop(hashcode % uint32(cpuCount)) + //i := (t.curLoop.Add(1) - 1) % uint32(cpuCount) + //return t.getOrInitClientEventLoop(i) +} + +func (t *Transport) getLoopKey(req *Request) string { + proxyStr := "" + if t.Proxy != nil { + proxyURL, _ := t.Proxy(req) + proxyStr = proxyURL.String() + } + return req.URL.String() + proxyStr +} + +func (t *Transport) RoundTrip(req *Request) (*Response, error) { + if debugSwitch { + println("############### RoundTrip start") + defer println("############### RoundTrip end") + } + + eventLoop := t.getClientEventLoop(req) + + // If timeout is set, start the timer + var didTimeout func() bool + var stopTimer func() + // Only the first request will initialize the timer + if req.timer == nil && !req.deadline.IsZero() { + req.timer = &libuv.Timer{} + libuv.InitTimer(eventLoop.loop, req.timer) + ch := &timeoutData{ + timeoutch: req.timeoutch, + taskData: nil, + } + (*libuv.Handle)(c.Pointer(req.timer)).SetData(c.Pointer(ch)) + + req.timer.Start(onTimeout, getMilliseconds(req.deadline), 0) + if debugSwitch { + println("############### timer start") + } + didTimeout = func() bool { return req.timer.GetDueIn() == 0 } + stopTimer = func() { + close(req.timeoutch) + req.timer.Stop() + if (*libuv.Handle)(c.Pointer(req.timer)).IsClosing() == 0 { + (*libuv.Handle)(c.Pointer(req.timer)).Close(nil) + } + if debugSwitch { + println("############### timer close") + } + } + } else { + didTimeout = alwaysFalse + stopTimer = nop + } + + resp, err := t.doRoundTrip(req, eventLoop) + if err != nil { + stopTimer() + return nil, err + } + + if !req.deadline.IsZero() { + resp.Body = &cancelTimerBody{ + stop: stopTimer, + rc: resp.Body, + reqDidTimeout: didTimeout, + } + } + return resp, nil +} + +func (t *Transport) doRoundTrip(req *Request, loop *clientEventLoop) (*Response, error) { + if debugSwitch { + println("############### doRoundTrip start") + defer println("############### doRoundTrip end") + } + //t.nextProtoOnce.Do(t.onceSetNextProtoDefaults) + //ctx := req.Context() + //trace := httptrace.ContextClientTrace(ctx) + + if req.URL == nil { + req.closeBody() + return nil, errors.New("http: nil Request.URL") + } + if req.Header == nil { + req.closeBody() + return nil, errors.New("http: nil Request.Header") + } + scheme := req.URL.Scheme + isHTTP := scheme == "http" || scheme == "https" + if isHTTP { + for k, vv := range req.Header { + if !ValidHeaderFieldName(k) { + req.closeBody() + return nil, fmt.Errorf("net/http: invalid header field name %q", k) + } + for _, v := range vv { + if !ValidHeaderFieldValue(v) { + req.closeBody() + // Don't include the value in the error, because it may be sensitive. + return nil, fmt.Errorf("net/http: invalid header field value for %q", k) + } + } + } + } + + origReq := req + cancelKey := cancelKey{origReq} + req = setupRewindBody(req) + + if altRT := t.alternateRoundTripper(req); altRT != nil { + if resp, err := altRT.RoundTrip(req); err != ErrSkipAltProtocol { + return resp, err + } + var err error + req, err = rewindBody(req) + if err != nil { + return nil, err + } + } + if !isHTTP { + req.closeBody() + return nil, badStringError("unsupported protocol scheme", scheme) + } + if req.Method != "" && !validMethod(req.Method) { + req.closeBody() + return nil, fmt.Errorf("net/http: invalid method %q", req.Method) + } + if req.URL.Host == "" { + req.closeBody() + return nil, errors.New("http: no Host in request URL") + } + + for { + select { + case <-req.timeoutch: + req.closeBody() + return nil, errors.New("request timeout!") + default: + } + + // treq gets modified by roundTrip, so we need to recreate for each retry. + //treq := &transportRequest{Request: req, trace: trace, cancelKey: cancelKey} + treq := &transportRequest{Request: req, cancelKey: cancelKey} + cm, err := t.connectMethodForRequest(treq, loop) + if err != nil { + req.closeBody() + return nil, err + } + + // Get the cached or newly-created connection to either the + // host (for http or https), the http proxy, or the http proxy + // pre-CONNECTed to https server. In any case, we'll be ready + // to send it requests. + pconn, err := t.getConn(treq, cm) + + if err != nil { + println("################# getConn err != nil") + t.setReqCanceler(cancelKey, nil) + req.closeBody() + return nil, err + } + + var resp *Response + if pconn.alt != nil { + // HTTP/2 path. + t.setReqCanceler(cancelKey, nil) // HTTP/2 not cancelable with CancelRequest + resp, err = pconn.alt.RoundTrip(req) + } else { + // HTTP/1.X path. + resp, err = pconn.roundTrip(treq) + } + + if err == nil { + resp.Request = origReq + return resp, nil + } + + // Failed. Clean up and determine whether to retry. + if http2isNoCachedConnError(err) { + if t.removeIdleConn(pconn) { + t.decConnsPerHost(pconn.cacheKey) + } + } else if !pconn.shouldRetryRequest(req, err) { + // Issue 16465: return underlying net.Conn.Read error from peek, + // as we've historically done. + if e, ok := err.(nothingWrittenError); ok { + err = e.error + } + if e, ok := err.(transportReadFromServerError); ok { + err = e.err + } + if b, ok := req.Body.(*readTrackingBody); ok && !b.didClose { + // Issue 49621: Close the request body if pconn.roundTrip + // didn't do so already. This can happen if the pconn + // write loop exits without reading the write request. + req.closeBody() + } + return nil, err + } + testHookRoundTripRetried() + + // Rewind the body if we're able to. + req, err = rewindBody(req) + if err != nil { + return nil, err + } + } +} + +func (t *Transport) getConn(treq *transportRequest, cm connectMethod) (pc *persistConn, err error) { + if debugSwitch { + println("############### getConn start") + defer println("############### getConn end") + } + req := treq.Request + //trace := treq.trace + //ctx := req.Context() + //if trace != nil && trace.GetConn != nil { + // trace.GetConn(cm.addr()) + //} + + w := &wantConn{ + cm: cm, + key: cm.key(), + //ctx: ctx, + timeoutch: treq.timeoutch, + beforeDial: testHookPrePendingDial, + afterDial: testHookPostPendingDial, + } + defer func() { + if err != nil { + w.cancel(t, err) + } + }() + + // Queue for idle connection. + if delivered := t.queueForIdleConn(w); delivered { + pc := w.pc + // Trace only for HTTP/1. + // HTTP/2 calls trace.GotConn itself. + //if pc.alt == nil && trace != nil && trace.GotConn != nil { + // trace.GotConn(pc.gotIdleConnTrace(pc.idleAt)) + //} + // set request canceler to some non-nil function so we + // can detect whether it was cleared between now and when + // we enter roundTrip + t.setReqCanceler(treq.cancelKey, func(error) {}) + return pc, nil + } + + cancelc := make(chan error, 1) + t.setReqCanceler(treq.cancelKey, func(err error) { cancelc <- err }) + + // Queue for permission to dial. + t.queueForDial(w) + + // Trace success but only for HTTP/1. + // HTTP/2 calls trace.GotConn itself. + //if w.pc != nil && w.pc.alt == nil && trace != nil && trace.GotConn != nil { + // trace.GotConn(httptrace.GotConnInfo{Conn: w.pc.conn, Reused: w.pc.isReused()}) + //} + if w.err != nil { + return nil, w.err + } + // If the request has been canceled, that's probably + // what caused w.err; if so, prefer to return the + // cancellation error (see golang.org/issue/16049). + select { + case <-req.timeoutch: + if debugSwitch { + println("############### getConn: timeoutch") + } + return nil, errors.New("timeout: req.Context().Err()") + case err := <-cancelc: + if err == errRequestCanceled { + err = errRequestCanceledConn + } + return nil, err + default: + // return below + } + return w.pc, w.err +} + +// queueForDial queues w to wait for permission to begin dialing. +// Once w receives permission to dial, it will do so in a separate goroutine. +func (t *Transport) queueForDial(w *wantConn) { + if debugSwitch { + println("############### queueForDial start") + defer println("############### queueForDial end") + } + w.beforeDial() + + if t.MaxConnsPerHost <= 0 { + t.dialConnFor(w) + return + } + + t.connsPerHostMu.Lock() + defer t.connsPerHostMu.Unlock() + + if n := t.connsPerHost[w.key]; n < t.MaxConnsPerHost { + if t.connsPerHost == nil { + t.connsPerHost = make(map[connectMethodKey]int) + } + t.connsPerHost[w.key] = n + 1 + t.dialConnFor(w) + return + } + + if t.connsPerHostWait == nil { + t.connsPerHostWait = make(map[connectMethodKey]wantConnQueue) + } + q := t.connsPerHostWait[w.key] + q.cleanFront() + q.pushBack(w) + t.connsPerHostWait[w.key] = q +} + +// dialConnFor dials on behalf of w and delivers the result to w. +// dialConnFor has received permission to dial w.cm and is counted in t.connCount[w.cm.key()]. +// If the dial is canceled or unsuccessful, dialConnFor decrements t.connCount[w.cm.key()]. +func (t *Transport) dialConnFor(w *wantConn) { + if debugSwitch { + println("############### dialConnFor start") + defer println("############### dialConnFor end") + } + defer w.afterDial() + + pc, err := t.dialConn(w.timeoutch, w.cm) + delivered := w.tryDeliver(pc, err) + // If the connection was successfully established but was not passed to w, + // or is a shareable HTTP/2 connection + if err == nil && (!delivered || pc.alt != nil) { + // pconn was not passed to w, + // or it is HTTP/2 and can be shared. + // Add to the idle connection pool. + t.putOrCloseIdleConn(pc) + } + // If an error occurs during the dialing process, the connection count for that host is decreased. + // This ensures that the connection count remains accurate even in cases where the dial attempt fails. + if err != nil { + t.decConnsPerHost(w.key) + } +} + +// decConnsPerHost decrements the per-host connection count for key, +// which may in turn give a different waiting goroutine permission to dial. +func (t *Transport) decConnsPerHost(key connectMethodKey) { + if t.MaxConnsPerHost <= 0 { + return + } + + t.connsPerHostMu.Lock() + defer t.connsPerHostMu.Unlock() + n := t.connsPerHost[key] + if n == 0 { + // Shouldn't happen, but if it does, the counting is buggy and could + // easily lead to a silent deadlock, so report the problem loudly. + panic("net/http: internal error: connCount underflow") + } + + // Can we hand this count to a goroutine still waiting to dial? + // (Some goroutines on the wait list may have timed out or + // gotten a connection another way. If they're all gone, + // we don't want to kick off any spurious dial operations.) + if q := t.connsPerHostWait[key]; q.len() > 0 { + done := false + for q.len() > 0 { + w := q.popFront() + if w.waiting() { + t.dialConnFor(w) + done = true + break + } + } + if q.len() == 0 { + delete(t.connsPerHostWait, key) + } else { + // q is a value (like a slice), so we have to store + // the updated q back into the map. + t.connsPerHostWait[key] = q + } + if done { + return + } + } + + // Otherwise, decrement the recorded count. + if n--; n == 0 { + delete(t.connsPerHost, key) + } else { + t.connsPerHost[key] = n + } +} + +func (t *Transport) dialConn(timeoutch chan struct{}, cm connectMethod) (pconn *persistConn, err error) { + if debugSwitch { + println("############### dialConn start") + defer println("############### dialConn end") + } + select { + case <-timeoutch: + err = errors.New("[t.dialConn] request timeout") + return + default: + } + pconn = &persistConn{ + t: t, + cacheKey: cm.key(), + closech: make(chan struct{}, 1), + writeLoopDone: make(chan struct{}, 1), + alive: true, + chunkAsync: &libuv.Async{}, + eventLoop: cm.eventLoop, + } + cm.eventLoop.loop.Async(pconn.chunkAsync, readyToRead) + + conn, err := t.dial(cm) + if err != nil { + return nil, err + } + pconn.conn = conn + + select { + case <-timeoutch: + conn.Close() + return + default: + } + // TODO(hah) Proxy(https/sock5)(t.dialConn) + // Proxy setup. + switch { + case cm.proxyURL == nil: + // Do nothing. Not using a proxy. + // case cm.proxyURL.Scheme == "socks5": + case cm.targetScheme == "http": + pconn.isProxy = true + if pa := cm.proxyAuth(); pa != "" { + pconn.mutateHeaderFunc = func(h Header) { + h.Set("Proxy-Authorization", pa) + } + } + // case cm.targetScheme == "https": + } + + pconn.closeErr = errReadLoopExiting + + select { + case <-timeoutch: + err = errors.New("[t.dialConn] request timeout") + if debugSwitch { + println("############### dialConn: timeoutch") + } + pconn.close(err) + return nil, err + default: + } + return pconn, nil +} + +func (t *Transport) dial(cm connectMethod) (*connData, error) { + if debugSwitch { + println("############### dial start") + defer println("############### dial end") + } + addr := cm.addr() + host, port, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + + conn := new(connData) + + libuv.InitTcp(cm.eventLoop.loop, &conn.tcpHandle) + (*libuv.Handle)(c.Pointer(&conn.tcpHandle)).SetData(c.Pointer(conn)) + + var hints cnet.AddrInfo + c.Memset(c.Pointer(&hints), 0, unsafe.Sizeof(hints)) + hints.Family = syscall.AF_UNSPEC + hints.SockType = syscall.SOCK_STREAM + + var res *cnet.AddrInfo + status := cnet.Getaddrinfo(c.AllocaCStr(host), c.AllocaCStr(port), &hints, &res) + if status != 0 { + return nil, fmt.Errorf("getaddrinfo error\n") + } + + (*libuv.Req)(c.Pointer(&conn.connectReq)).SetData(c.Pointer(conn)) + status = libuv.TcpConnect(&conn.connectReq, &conn.tcpHandle, res.Addr, onConnect) + if status != 0 { + return nil, fmt.Errorf("connect error: %s\n", c.GoString(libuv.Strerror(libuv.Errno(status)))) + } + + cnet.Freeaddrinfo(res) + return conn, nil +} + +func (pc *persistConn) roundTrip(req *transportRequest) (resp *Response, err error) { + if debugSwitch { + println("############### roundTrip start") + defer println("############### roundTrip end") + } + testHookEnterRoundTrip() + if !pc.t.replaceReqCanceler(req.cancelKey, pc.cancelRequest) { + pc.t.putOrCloseIdleConn(pc) + return nil, errRequestCanceled + } + pc.mu.Lock() + pc.numExpectedResponses++ + headerFn := pc.mutateHeaderFunc + pc.mu.Unlock() + + if headerFn != nil { + headerFn(req.extraHeaders()) + } + + // Set extra headers, such as Accept-Encoding, Connection(Keep-Alive). + requestedGzip := pc.setExtraHeaders(req) + + gone := make(chan struct{}, 1) + defer close(gone) + + defer func() { + if err != nil { + pc.t.setReqCanceler(req.cancelKey, nil) + } + }() + + // Write the request concurrently with waiting for a response, + // in case the server decides to reply before reading our full + // request body. + startBytesWritten := pc.conn.nwrite + writeErrCh := make(chan error, 1) + resc := make(chan responseAndError, 1) + + taskData := &taskData{ + req: req, + pc: pc, + addedGzip: requestedGzip, + writeErrCh: writeErrCh, + callerGone: gone, + resc: resc, + } + + //if pc.client == nil && !pc.isReused() { + // Hookup the IO + hyperIo := newHyperIo(pc.conn) + // We need an executor generally to poll futures + // Prepare client options + opts := hyper.NewClientConnOptions() + opts.Exec(pc.eventLoop.exec) + // send the handshake + handshakeTask := hyper.Handshake(hyperIo, opts) + taskData.taskId = handshake + handshakeTask.SetUserdata(c.Pointer(taskData), nil) + // Send the request to readWriteLoop(). + pc.eventLoop.exec.Push(handshakeTask) + //} else { + // println("############### roundTrip: pc.client != nil") + // taskData.taskId = read + // err = req.write(pc.client, taskData, pc.eventLoop.exec) + // if err != nil { + // writeErrCh <- err + // pc.close(err) + // } + //} + + // Wake up libuv. Loop + pc.eventLoop.async.Send() + + timeoutch := req.timeoutch + pcClosed := pc.closech + canceled := false + + for { + testHookWaitResLoop() + if debugSwitch { + println("############### roundTrip for") + } + select { + case err := <-writeErrCh: + if debugSwitch { + println("############### roundTrip: writeErrch") + } + if err != nil { + if debugSwitch { + println("############### roundTrip: writeErrch err != nil") + } + pc.close(fmt.Errorf("write error: %w", err)) + if pc.conn.nwrite == startBytesWritten { + err = nothingWrittenError{err} + } + return nil, pc.mapRoundTripError(req, startBytesWritten, err) + } + case <-pcClosed: + if debugSwitch { + println("############### roundTrip: pcClosed") + } + pcClosed = nil + if canceled || pc.t.replaceReqCanceler(req.cancelKey, nil) { + return nil, pc.mapRoundTripError(req, startBytesWritten, pc.closed) + } + //case <-respHeaderTimer: + case re := <-resc: + if debugSwitch { + println("############### roundTrip: resc") + } + if (re.res == nil) == (re.err == nil) { + return nil, fmt.Errorf("internal error: exactly one of res or err should be set; nil=%v", re.res == nil) + } + if re.err != nil { + return nil, pc.mapRoundTripError(req, startBytesWritten, re.err) + } + return re.res, nil + case <-timeoutch: + if debugSwitch { + println("############### roundTrip: timeoutch") + } + canceled = pc.t.cancelRequest(req.cancelKey, errors.New("timeout: req.Context().Err()")) + timeoutch = nil + return nil, errors.New("request timeout") + } + } +} + +// readWriteLoop handles the main I/O loop for a persistent connection. +// It processes incoming requests, sends them to the server, and handles responses. +func readWriteLoop(checker *libuv.Idle) { + eventLoop := (*clientEventLoop)((*libuv.Handle)(c.Pointer(checker)).GetData()) + + // The polling state machine! Poll all ready tasks and act on them... + task := eventLoop.exec.Poll() + for task != nil { + if debugSwitch { + println("############### polling") + } + eventLoop.handleTask(task) + task = eventLoop.exec.Poll() + } +} + +func (eventLoop *clientEventLoop) handleTask(task *hyper.Task) { + taskData := (*taskData)(task.Userdata()) + if taskData == nil { + // A background task for hyper_client completed... + task.Free() + return + } + var err error + pc := taskData.pc + // If original taskId is set, we need to check it + err = checkTaskType(task, taskData) + if err != nil { + if debugSwitch { + println("############### handleTask: checkTaskType err != nil") + } + closeAndRemoveIdleConn(pc, true) + return + } + switch taskData.taskId { + case handshake: + if debugReadWriteLoop { + println("############### write") + } + + // Check if the connection is closed + select { + case <-pc.closech: + task.Free() + return + default: + } + + pc.client = (*hyper.ClientConn)(task.Value()) + task.Free() + + // TODO(hah) Proxy(writeLoop) + taskData.taskId = read + err = taskData.req.Request.write(pc.client, taskData, eventLoop.exec) + + if err != nil { + //pc.writeErrCh <- err // to the body reader, which might recycle us + taskData.writeErrCh <- err // to the roundTrip function + if debugSwitch { + println("############### handleTask: write err != nil") + } + pc.close(err) + return + } + + if debugReadWriteLoop { + println("############### write end") + } + case read: + if debugReadWriteLoop { + println("############### read") + } + + pc.tryPutIdleConn = func() bool { + if err := pc.t.tryPutIdleConn(pc); err != nil { + pc.closeErr = err + //if trace != nil && trace.PutIdleConn != nil && err != errKeepAlivesDisabled { + // trace.PutIdleConn(err) + //} + return false + } + //if trace != nil && trace.PutIdleConn != nil { + // trace.PutIdleConn(nil) + //} + return true + } + + // Take the results + hyperResp := (*hyper.Response)(task.Value()) + task.Free() + + //pc.mu.Lock() + if pc.numExpectedResponses == 0 { + pc.readLoopPeekFailLocked(hyperResp, err) + pc.mu.Unlock() + if debugSwitch { + println("############### handleTask: numExpectedResponses == 0") + } + closeAndRemoveIdleConn(pc, true) + return + } + //pc.mu.Unlock() + + var resp *Response + if err == nil { + pc.chunkAsync.SetData(c.Pointer(taskData)) + bc := newBodyChunk(pc.chunkAsync) + pc.bodyChunk = bc + resp, err = ReadResponse(bc, taskData.req.Request, hyperResp) + taskData.hyperBody = hyperResp.Body() + } else { + err = transportReadFromServerError{err} + pc.closeErr = err + } + + // No longer need the response + hyperResp.Free() + + if err != nil { + pc.bodyChunk.closeWithError(err) + taskData.closeHyperBody() + select { + case taskData.resc <- responseAndError{err: err}: + case <-taskData.callerGone: + if debugSwitch { + println("############### handleTask read: callerGone") + } + closeAndRemoveIdleConn(pc, true) + return + } + if debugSwitch { + println("############### handleTask: read err != nil") + } + closeAndRemoveIdleConn(pc, true) + return + } + + taskData.taskId = readBodyChunk + + if !taskData.req.deadline.IsZero() { + (*timeoutData)((*libuv.Handle)(c.Pointer(taskData.req.timer)).GetData()).taskData = taskData + } + + //pc.mu.Lock() + pc.numExpectedResponses-- + //pc.mu.Unlock() + + needContinue := resp.checkRespBody(taskData) + if needContinue { + return + } + + resp.wrapRespBody(taskData) + + select { + case taskData.resc <- responseAndError{res: resp}: + case <-taskData.callerGone: + // defer + if debugSwitch { + println("############### handleTask read: callerGone 2") + } + pc.bodyChunk.Close() + taskData.closeHyperBody() + closeAndRemoveIdleConn(pc, true) + return + } + + if debugReadWriteLoop { + println("############### read end") + } + case readBodyChunk: + if debugReadWriteLoop { + println("############### readBodyChunk") + } + + taskType := task.Type() + if taskType == hyper.TaskBuf { + chunk := (*hyper.Buf)(task.Value()) + chunkLen := chunk.Len() + bytes := unsafe.Slice(chunk.Bytes(), chunkLen) + // Free chunk and task + chunk.Free() + task.Free() + // Write to the channel + pc.bodyChunk.readCh <- bytes + if debugReadWriteLoop { + println("############### readBodyChunk end [buf]") + } + return + } + + // taskType == taskEmpty (check in checkTaskType) + task.Free() + pc.bodyChunk.closeWithError(io.EOF) + taskData.closeHyperBody() + replaced := pc.t.replaceReqCanceler(taskData.req.cancelKey, nil) // before pc might return to idle pool + pc.alive = pc.alive && + replaced && pc.tryPutIdleConn() + + if debugSwitch { + println("############### handleTask readBodyChunk: alive: ", pc.alive) + } + closeAndRemoveIdleConn(pc, false) + + if debugReadWriteLoop { + println("############### readBodyChunk end [empty]") + } + } +} + +func readyToRead(aysnc *libuv.Async) { + taskData := (*taskData)(aysnc.GetData()) + dataTask := taskData.hyperBody.Data() + dataTask.SetUserdata(c.Pointer(taskData), nil) + taskData.pc.eventLoop.exec.Push(dataTask) +} + +// closeAndRemoveIdleConn Replace the defer function of readLoop in stdlib +func closeAndRemoveIdleConn(pc *persistConn, force bool) { + if pc.alive == true && !force { + return + } + if debugSwitch { + println("############### closeAndRemoveIdleConn, force:", force) + } + pc.close(pc.closeErr) + pc.t.removeIdleConn(pc) +} + +// ---------------------------------------------------------- + +type connData struct { + tcpHandle libuv.Tcp + connectReq libuv.Connect + readBuf libuv.Buf + readBufFilled uintptr + nwrite int64 // bytes written(Replaced from persistConn's nwrite) + readWaker *hyper.Waker + writeWaker *hyper.Waker + isClosing atomic.Bool +} + +type taskData struct { + taskId taskId + req *transportRequest + pc *persistConn + addedGzip bool + writeErrCh chan error + callerGone chan struct{} + resc chan responseAndError + hyperBody *hyper.Body +} + +// taskId The unique identifier of the next task polled from the executor +type taskId c.Int + +const ( + handshake taskId = iota + 1 + read + readBodyChunk +) + +func (conn *connData) Close() { + if conn != nil && !conn.isClosing.Swap(true) { + if conn.readWaker != nil { + conn.readWaker.Free() + conn.readWaker = nil + } + if conn.writeWaker != nil { + conn.writeWaker.Free() + conn.writeWaker = nil + } + //if conn.readBuf.Base != nil { + // c.Free(c.Pointer(conn.readBuf.Base)) + // conn.readBuf.Base = nil + //} + if (*libuv.Handle)(c.Pointer(&conn.tcpHandle)).IsClosing() == 0 { + (*libuv.Handle)(c.Pointer(&conn.tcpHandle)).Close(nil) + } + conn = nil + } +} + +func (d *taskData) closeHyperBody() { + if d.hyperBody != nil { + d.hyperBody.Free() + d.hyperBody = nil + } +} + +// onConnect is the libuv callback for a successful connection +func onConnect(req *libuv.Connect, status c.Int) { + if debugSwitch { + println("############### connect start") + defer println("############### connect end") + } + conn := (*connData)((*libuv.Req)(c.Pointer(req)).GetData()) + if status < 0 { + c.Fprintf(c.Stderr, c.Str("connect error: %s\n"), c.GoString(libuv.Strerror(libuv.Errno(status)))) + conn.Close() + return + } + + // Keep-Alive + conn.tcpHandle.KeepAlive(1, 60) + + (*libuv.Stream)(c.Pointer(&conn.tcpHandle)).StartRead(allocBuffer, onRead) +} + +// allocBuffer allocates a buffer for reading from a socket +func allocBuffer(handle *libuv.Handle, suggestedSize uintptr, buf *libuv.Buf) { + conn := (*connData)(handle.GetData()) + if conn.readBuf.Base == nil { + //conn.readBuf = libuv.InitBuf((*c.Char)(c.Malloc(suggestedSize)), c.Uint(suggestedSize)) + base := make([]byte, suggestedSize) + conn.readBuf = libuv.InitBuf((*c.Char)(c.Pointer(&base[0])), c.Uint(suggestedSize)) + conn.readBufFilled = 0 + } + *buf = libuv.InitBuf((*c.Char)(c.Pointer(uintptr(c.Pointer(conn.readBuf.Base))+conn.readBufFilled)), c.Uint(suggestedSize-conn.readBufFilled)) +} + +// onRead is the libuv callback for reading from a socket +// This callback function is called when data is available to be read +func onRead(stream *libuv.Stream, nread c.Long, buf *libuv.Buf) { + conn := (*connData)((*libuv.Handle)(c.Pointer(stream)).GetData()) + if nread > 0 { + conn.readBufFilled += uintptr(nread) + } + if conn.readWaker != nil { + // Wake up the pending read operation of Hyper + conn.readWaker.Wake() + conn.readWaker = nil + } +} + +// readCallBack read callback function for Hyper library +func readCallBack(userdata c.Pointer, ctx *hyper.Context, buf *uint8, bufLen uintptr) uintptr { + conn := (*connData)(userdata) + if conn.readBufFilled > 0 { + var toCopy uintptr + if bufLen < conn.readBufFilled { + toCopy = bufLen + } else { + toCopy = conn.readBufFilled + } + // Copy data from read buffer to Hyper's buffer + c.Memcpy(c.Pointer(buf), c.Pointer(conn.readBuf.Base), toCopy) + // Move remaining data to the beginning of the buffer + c.Memmove(c.Pointer(conn.readBuf.Base), c.Pointer(uintptr(c.Pointer(conn.readBuf.Base))+toCopy), conn.readBufFilled-toCopy) + // Update the amount of filled buffer + conn.readBufFilled -= toCopy + return toCopy + } + + if conn.readWaker != nil { + conn.readWaker.Free() + } + conn.readWaker = ctx.Waker() + println("############### readCallBack: IoPending") + return hyper.IoPending +} + +// onWrite is the libuv callback for writing to a socket +// Callback function called after a write operation completes +func onWrite(req *libuv.Write, status c.Int) { + conn := (*connData)((*libuv.Req)(c.Pointer(req)).GetData()) + if conn.writeWaker != nil { + // Wake up the pending write operation + conn.writeWaker.Wake() + conn.writeWaker = nil + } +} + +// writeCallBack write callback function for Hyper library +func writeCallBack(userdata c.Pointer, ctx *hyper.Context, buf *uint8, bufLen uintptr) uintptr { + conn := (*connData)(userdata) + initBuf := libuv.InitBuf((*c.Char)(c.Pointer(buf)), c.Uint(bufLen)) + req := &libuv.Write{} + (*libuv.Req)(c.Pointer(req)).SetData(c.Pointer(conn)) + + ret := req.Write((*libuv.Stream)(c.Pointer(&conn.tcpHandle)), &initBuf, 1, onWrite) + if ret >= 0 { + conn.nwrite += int64(bufLen) + return bufLen + } + + if conn.writeWaker != nil { + conn.writeWaker.Free() + } + conn.writeWaker = ctx.Waker() + println("############### writeCallBack: IoPending") + return hyper.IoPending +} + +// onTimeout is the libuv callback for a timeout +func onTimeout(timer *libuv.Timer) { + if debugSwitch { + println("############### onTimeout start") + defer println("############### onTimeout end") + } + data := (*timeoutData)((*libuv.Handle)(c.Pointer(timer)).GetData()) + close(data.timeoutch) + timer.Stop() + + taskData := data.taskData + if taskData != nil { + pc := taskData.pc + pc.alive = false + pc.t.cancelRequest(taskData.req.cancelKey, errors.New("timeout: req.Context().Err()")) + closeAndRemoveIdleConn(pc, true) + } +} + +// newHyperIo creates a new IO with read and write callbacks +func newHyperIo(connData *connData) *hyper.Io { + hyperIo := hyper.NewIo() + hyperIo.SetUserdata(c.Pointer(connData), nil) + hyperIo.SetRead(readCallBack) + hyperIo.SetWrite(writeCallBack) + return hyperIo +} + +// checkTaskType checks the task type +func checkTaskType(task *hyper.Task, taskData *taskData) (err error) { + curTaskId := taskData.taskId + taskType := task.Type() + if taskType == hyper.TaskError { + err = fail((*hyper.Error)(task.Value()), curTaskId) + } + if err == nil { + switch curTaskId { + case handshake: + if taskType != hyper.TaskClientConn { + err = errors.New("Unexpected hyper task type: expected to be TaskClientConn, actual is " + strTaskType(taskType)) + } + case read: + if taskType != hyper.TaskResponse { + err = errors.New("Unexpected hyper task type: expected to be TaskResponse, actual is " + strTaskType(taskType)) + } + case readBodyChunk: + if taskType != hyper.TaskBuf && taskType != hyper.TaskEmpty { + err = errors.New("Unexpected hyper task type: expected to be TaskBuf / TaskEmpty, actual is " + strTaskType(taskType)) + } + } + } + if err != nil { + task.Free() + if curTaskId == handshake || curTaskId == read { + taskData.writeErrCh <- err + if debugSwitch { + println("############### checkTaskType: writeErrCh") + } + taskData.pc.close(err) + } + if taskData.pc.bodyChunk != nil { + taskData.pc.bodyChunk.Close() + taskData.pc.bodyChunk = nil + } + taskData.closeHyperBody() + taskData.pc.alive = false + } + return +} + +// fail prints the error details and panics +func fail(err *hyper.Error, taskId taskId) error { + if err != nil { + // grab the error details + var errBuf [256]c.Char + errLen := err.Print((*uint8)(c.Pointer(&errBuf[:][0])), uintptr(len(errBuf))) + errDetails := unsafe.SliceData(errBuf[:errLen]) + details := c.GoString(errDetails) + fmt.Println(details) + + // clean up the error + err.Free() + return fmt.Errorf("hyper request error, taskId: %s, details: %s\n", strTaskId(taskId), details) + } + return nil +} + +func strTaskType(taskType hyper.TaskReturnType) string { + switch taskType { + case hyper.TaskClientConn: + return "TaskClientConn" + case hyper.TaskResponse: + return "TaskResponse" + case hyper.TaskBuf: + return "TaskBuf" + case hyper.TaskEmpty: + return "TaskEmpty" + case hyper.TaskError: + return "TaskError" + default: + return "Unknown" + } +} + +func strTaskId(taskId taskId) string { + switch taskId { + case handshake: + return "handshake" + case read: + return "read" + case readBodyChunk: + return "readBodyChunk" + default: + return "notSet" + } +} + +// ---------------------------------------------------------- + +// error values for debugging and testing, not seen by users. +var ( + errKeepAlivesDisabled = errors.New("http: putIdleConn: keep alives disabled") + errConnBroken = errors.New("http: putIdleConn: connection is in bad state") + errCloseIdle = errors.New("http: putIdleConn: CloseIdleConnections was called") + errTooManyIdle = errors.New("http: putIdleConn: too many idle connections") + errTooManyIdleHost = errors.New("http: putIdleConn: too many idle connections for host") + errCloseIdleConns = errors.New("http: CloseIdleConnections called") + errReadLoopExiting = errors.New("http: Transport.readWriteLoop.read exiting") + errIdleConnTimeout = errors.New("http: idle connection timeout") + + // errServerClosedIdle is not seen by users for idempotent requests, but may be + // seen by a user if the server shuts down an idle connection and sends its FIN + // in flight with already-written POST body bytes from the client. + // See https://github.com/golang/go/issues/19943#issuecomment-355607646 + errServerClosedIdle = errors.New("http: server closed idle connection") +) + +// ErrSkipAltProtocol is a sentinel error value defined by Transport.RegisterProtocol. +var ErrSkipAltProtocol = errors.New("net/http: skip alternate protocol") +var errCannotRewind = errors.New("net/http: cannot rewind body after connection loss") + +// errRequestCanceled is set to be identical to the one from h2 to facilitate +// testing. +var errRequestCanceled = http2errRequestCanceled + +// errRequestCanceled is a copy of net/http's errRequestCanceled because it's not +// exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests. +var http2errRequestCanceled = errors.New("net/http: request canceled") +var errRequestCanceledConn = errors.New("net/http: request canceled while waiting for connection") // TODO: unify? +// errCallerOwnsConn is an internal sentinel error used when we hand +// off a writable response.Body to the caller. We use this to prevent +// closing a net.Conn that is now owned by the caller. +var errCallerOwnsConn = errors.New("read loop ending; caller owns writable underlying conn") + +type httpError struct { + err string + timeout bool +} + +func (e *httpError) Error() string { return e.err } +func (e *httpError) Timeout() bool { return e.timeout } +func (e *httpError) Temporary() bool { return true } + +// nothingWrittenError wraps a write errors which ended up writing zero bytes. +type nothingWrittenError struct { + error +} + +func (nwe nothingWrittenError) Unwrap() error { + return nwe.error +} + +// transportReadFromServerError is used by Transport.readLoop when the +// 1 byte peek read fails and we're actually anticipating a response. +// Usually this is just due to the inherent keep-alive shut down race, +// where the server closed the connection at the same time the client +// wrote. The underlying err field is usually io.EOF or some +// ECONNRESET sort of thing which varies by platform. But it might be +// the user's custom net.Conn.Read error too, so we carry it along for +// them to return from Transport.RoundTrip. +type transportReadFromServerError struct { + err error +} + +func (e transportReadFromServerError) Unwrap() error { return e.err } +func (e transportReadFromServerError) Error() string { + return fmt.Sprintf("net/http: Transport failed to read from server: %v", e.err) +} + +func nop() {} + +// testHooks. Always non-nil. +var ( + testHookEnterRoundTrip = nop + testHookWaitResLoop = nop + testHookRoundTripRetried = nop + testHookPrePendingDial = nop + testHookPostPendingDial = nop +) + +var portMap = map[string]string{ + "http": "80", + "https": "443", + "socks5": "1080", +} + +func idnaASCIIFromURL(url *url.URL) string { + addr := url.Hostname() + if v, err := idnaASCII(addr); err == nil { + addr = v + } + return addr +} + +// canonicalAddr returns url.Host but always with a ":port" suffix. +func canonicalAddr(url *url.URL) string { + port := url.Port() + if port == "" { + port = portMap[url.Scheme] + } + return net.JoinHostPort(idnaASCIIFromURL(url), port) +} + +// persistConn wraps a connection, usually a persistent one +// (but may be used for non-keep-alive requests as well) +type persistConn struct { + // alt optionally specifies the TLS NextProto RoundTripper. + // This is used for HTTP/2 today and future protocols later. + // If it's non-nil, the rest of the fields are unused. + alt RoundTripper + + t *Transport + eventLoop *clientEventLoop + + cacheKey connectMethodKey + conn *connData + //tlsState *tls.ConnectionState + //nwrite int64 // bytes written(Replaced by connData.nwrite) + closech chan struct{} // closed when conn closed + isProxy bool + + writeLoopDone chan struct{} // closed when readWriteLoop ends + + // Both guarded by Transport.idleMu: + idleAt time.Time // time it last become idle + idleTimer *libuv.Timer // holding an onIdleConnTimeout to close it + + mu sync.Mutex // guards following fields + numExpectedResponses int + closed error // set non-nil when conn is closed, before closech is closed + canceledErr error // set non-nil if conn is canceled + broken bool // an error has happened on this connection; marked broken so it's not reused. + // mutateHeaderFunc is an optional func to modify extra + // headers on each outbound request before it's written. (the + // original Request given to RoundTrip is not modified) + reused bool // whether conn has had successful request/response and is being reused. + mutateHeaderFunc func(Header) + + // other + alive bool // Replace the alive in readLoop + closeErr error // Replace the closeErr in readLoop + tryPutIdleConn func() bool // Replace the tryPutIdleConn in readLoop + client *hyper.ClientConn // http long connection client handle + bodyChunk *bodyChunk // Implement non-blocking consumption of each responseBody chunk + chunkAsync *libuv.Async // Notifying that the received chunk has been read +} + +// CloseIdleConnections closes any connections which were previously +// connected from previous requests but are now sitting idle in +// a "keep-alive" state. It does not interrupt any connections currently +// in use. +func (t *Transport) CloseIdleConnections() { + if debugSwitch { + println("############### CloseIdleConnections") + } + //t.nextProtoOnce.Do(t.onceSetNextProtoDefaults) + t.idleMu.Lock() + m := t.idleConn + t.idleConn = nil + t.closeIdle = true // close newly idle connections + t.idleLRU = connLRU{} + t.idleMu.Unlock() + for _, conns := range m { + for _, pconn := range conns { + pconn.close(errCloseIdleConns) + } + } + + //if t2 := t.h2transport; t2 != nil { + // t2.CloseIdleConnections() + //} +} + +func (pc *persistConn) cancelRequest(err error) { + if debugSwitch { + println("############### cancelRequest") + } + pc.mu.Lock() + defer pc.mu.Unlock() + pc.canceledErr = err + pc.closeLocked(errRequestCanceled) +} + +// close closes the underlying TCP connection and closes +// the pc.closech channel. +// +// The provided err is only for testing and debugging; in normal +// circumstances it should never be seen by users. +func (pc *persistConn) close(err error) { + pc.mu.Lock() + defer pc.mu.Unlock() + pc.closeLocked(err) +} + +// markReused marks this connection as having been successfully used for a +// request and response. +func (pc *persistConn) markReused() { + pc.mu.Lock() + pc.reused = true + pc.mu.Unlock() +} + +func (pc *persistConn) closeLocked(err error) { + if debugSwitch { + println("############### pc closed") + } + if err == nil { + panic("nil error") + } + pc.broken = true + if pc.closed == nil { + pc.closed = err + pc.t.decConnsPerHost(pc.cacheKey) + // Close HTTP/1 (pc.alt == nil) connection. + // HTTP/2 closes its connection itself. + if pc.alt == nil { + if err != errCallerOwnsConn { + pc.conn.Close() + } + close(pc.closech) + close(pc.writeLoopDone) + if pc.client != nil { + pc.client.Free() + pc.client = nil + } + if pc.chunkAsync != nil && pc.chunkAsync.IsClosing() == 0 { + pc.chunkAsync.Close(nil) + pc.chunkAsync = nil + } + } + } + pc.mutateHeaderFunc = nil +} + +// mapRoundTripError returns the appropriate error value for +// persistConn.roundTrip. +// +// The provided err is the first error that (*persistConn).roundTrip +// happened to receive from its select statement. +// +// The startBytesWritten value should be the value of pc.nwrite before the roundTrip +// started writing the request. +func (pc *persistConn) mapRoundTripError(req *transportRequest, startBytesWritten int64, err error) error { + if err == nil { + return nil + } + + // Wait for the writeLoop goroutine to terminate to avoid data + // races on callers who mutate the request on failure. + // + // When resc in pc.roundTrip and hence rc.ch receives a responseAndError + // with a non-nil error it implies that the persistConn is either closed + // or closing. Waiting on pc.writeLoopDone is hence safe as all callers + // close closech which in turn ensures writeLoop returns. + <-pc.writeLoopDone + + // If the request was canceled, that's better than network + // failures that were likely the result of tearing down the + // connection. + if cerr := pc.canceled(); cerr != nil { + return cerr + } + + // See if an error was set explicitly. + req.mu.Lock() + reqErr := req.err + req.mu.Unlock() + if reqErr != nil { + return reqErr + } + + if err == errServerClosedIdle { + // Don't decorate + return err + } + + if _, ok := err.(transportReadFromServerError); ok { + if pc.conn.nwrite == startBytesWritten { + return nothingWrittenError{err} + } + // Don't decorate + return err + } + if pc.isBroken() { + if pc.conn.nwrite == startBytesWritten { + return nothingWrittenError{err} + } + return fmt.Errorf("net/http: HTTP/1.x transport connection broken: %w", err) + } + return err +} + +// canceled returns non-nil if the connection was closed due to +// CancelRequest or due to context cancellation. +func (pc *persistConn) canceled() error { + pc.mu.Lock() + defer pc.mu.Unlock() + return pc.canceledErr +} + +// isReused reports whether this connection has been used before. +func (pc *persistConn) isReused() bool { + pc.mu.Lock() + r := pc.reused + pc.mu.Unlock() + return r +} + +// isBroken reports whether this connection is in a known broken state. +func (pc *persistConn) isBroken() bool { + pc.mu.Lock() + b := pc.closed != nil + pc.mu.Unlock() + return b +} + +// shouldRetryRequest reports whether we should retry sending a failed +// HTTP request on a new connection. The non-nil input error is the +// error from roundTrip. +func (pc *persistConn) shouldRetryRequest(req *Request, err error) bool { + if http2isNoCachedConnError(err) { + // Issue 16582: if the user started a bunch of + // requests at once, they can all pick the same conn + // and violate the server's max concurrent streams. + // Instead, match the HTTP/1 behavior for now and dial + // again to get a new TCP connection, rather than failing + // this request. + return true + } + if err == errMissingHost { + // User error. + return false + } + if !pc.isReused() { + // This was a fresh connection. There's no reason the server + // should've hung up on us. + // + // Also, if we retried now, we could loop forever + // creating new connections and retrying if the server + // is just hanging up on us because it doesn't like + // our request (as opposed to sending an error). + return false + } + if _, ok := err.(nothingWrittenError); ok { + // We never wrote anything, so it's safe to retry, if there's no body or we + // can "rewind" the body with GetBody. + return req.outgoingLength() == 0 || req.GetBody != nil + } + if !req.isReplayable() { + // Don't retry non-idempotent requests. + return false + } + if _, ok := err.(transportReadFromServerError); ok { + // We got some non-EOF net.Conn.Read failure reading + // the 1st response byte from the server. + return true + } + // The server replied with io.EOF while we were trying to + // read the response. Probably an unfortunately keep-alive + // timeout, just as the client was writing a request. + // conservatively return false. + return err == errServerClosedIdle +} + +// closeConnIfStillIdle closes the connection if it's still sitting idle. +// This is what's called by the persistConn's idleTimer, and is run in its +// own goroutine. +func (pc *persistConn) closeConnIfStillIdle() bool { + t := pc.t + isLock := t.idleMu.TryLock() + if isLock { + defer t.idleMu.Unlock() + pc.closeConnIfStillIdleLocked() + return true + } + return false +} + +func (pc *persistConn) closeConnIfStillIdleLocked() { + t := pc.t + if _, ok := t.idleLRU.m[pc]; !ok { + // Not idle. + return + } + t.removeIdleConnLocked(pc) + if debugSwitch { + println("############### closeConnIfStillIdleLocked") + } + pc.close(errIdleConnTimeout) +} + +func (pc *persistConn) readLoopPeekFailLocked(resp *hyper.Response, err error) { + if debugSwitch { + println("############### readLoopPeekFailLocked") + } + if pc.closed != nil { + return + } + if is408Message(resp) { + pc.closeLocked(errServerClosedIdle) + return + } + pc.closeLocked(fmt.Errorf("readLoopPeekFailLocked: %w", err)) +} + +// setExtraHeaders Set extra headers, such as Accept-Encoding, Connection(Keep-Alive). +func (pc *persistConn) setExtraHeaders(req *transportRequest) bool { + // Ask for a compressed version if the caller didn't set their + // own value for Accept-Encoding. We only attempt to + // uncompress the gzip stream if we were the layer that + // requested it. + requestedGzip := false + // TODO(hah) gzip(pc.roundTrip): The compress/gzip library still has a bug. An exception occurs when calling gzip.NewReader(). + //if !pc.t.DisableCompression && + // req.Header.Get("Accept-Encoding") == "" && + // req.Header.Get("Range") == "" && + // req.Method != "HEAD" { + // // Request gzip only, not deflate. Deflate is ambiguous and + // // not as universally supported anyway. + // // See: https://zlib.net/zlib_faq.html#faq39 + // // + // // Note that we don't request this for HEAD requests, + // // due to a bug in nginx: + // // https://trac.nginx.org/nginx/ticket/358 + // // https://golang.org/issue/5522 + // // + // // We don't request gzip if the request is for a range, since + // // auto-decoding a portion of a gzipped document will just fail + // // anyway. See https://golang.org/issue/8923 + // requestedGzip = true + // req.extraHeaders().Set("Accept-Encoding", "gzip") + //} + + // The 100-continue operation in Hyper is handled in the newHyperRequest function. + + // Keep-Alive + if pc.t.DisableKeepAlives && + !req.wantsClose() && + !isProtocolSwitchHeader(req.Header) { + req.extraHeaders().Set("Connection", "close") + } + return requestedGzip +} + +func is408Message(resp *hyper.Response) bool { + httpVersion := int(resp.Version()) + if httpVersion != 10 && httpVersion != 11 { + return false + } + return resp.Status() == 408 +} + +// isNoCachedConnError reports whether err is of type noCachedConnError +// or its equivalent renamed type in net/http2's h2_bundle.go. Both types +// may coexist in the same running program. +func http2isNoCachedConnError(err error) bool { // h2_bundle.go + _, ok := err.(interface{ IsHTTP2NoCachedConnError() }) + return ok +} + +// connectMethod is the map key (in its String form) for keeping persistent +// TCP connections alive for subsequent HTTP requests. +// +// A connect method may be of the following types: +// +// connectMethod.key().String() Description +// ------------------------------ ------------------------- +// |http|foo.com http directly to server, no proxy +// |https|foo.com https directly to server, no proxy +// |https,h1|foo.com https directly to server w/o HTTP/2, no proxy +// http://proxy.com|https|foo.com http to proxy, then CONNECT to foo.com +// http://proxy.com|http http to proxy, http to anywhere after that +// socks5://proxy.com|http|foo.com socks5 to proxy, then http to foo.com +// socks5://proxy.com|https|foo.com socks5 to proxy, then https to foo.com +// https://proxy.com|https|foo.com https to proxy, then CONNECT to foo.com +// https://proxy.com|http https to proxy, http to anywhere after that +type connectMethod struct { + _ incomparable + proxyURL *url.URL // nil for no proxy, else full proxy URL + targetScheme string // "http" or "https" + // If proxyURL specifies an http or https proxy, and targetScheme is http (not https), + // then targetAddr is not included in the connect method key, because the socket can + // be reused for different targetAddr values. + targetAddr string + onlyH1 bool // whether to disable HTTP/2 and force HTTP/1 + + eventLoop *clientEventLoop +} + +// connectMethodKey is the map key version of connectMethod, with a +// stringified proxy URL (or the empty string) instead of a pointer to +// a URL. +type connectMethodKey struct { + proxy, scheme, addr string + onlyH1 bool +} + +func (cm *connectMethod) key() connectMethodKey { + proxyStr := "" + targetAddr := cm.targetAddr + if cm.proxyURL != nil { + proxyStr = cm.proxyURL.String() + if (cm.proxyURL.Scheme == "http" || cm.proxyURL.Scheme == "https") && cm.targetScheme == "http" { + targetAddr = "" + } + } + return connectMethodKey{ + proxy: proxyStr, + scheme: cm.targetScheme, + addr: targetAddr, + onlyH1: cm.onlyH1, + } +} + +// scheme returns the first hop scheme: http, https, or socks5 +func (cm *connectMethod) scheme() string { + if cm.proxyURL != nil { + return cm.proxyURL.Scheme + } + return cm.targetScheme +} + +// addr returns the first hop "host:port" to which we need to TCP connect. +func (cm *connectMethod) addr() string { + if cm.proxyURL != nil { + return canonicalAddr(cm.proxyURL) + } + return cm.targetAddr +} + +// proxyAuth returns the Proxy-Authorization header to set +// on requests, if applicable. +func (cm *connectMethod) proxyAuth() string { + if cm.proxyURL == nil { + return "" + } + if u := cm.proxyURL.User; u != nil { + username := u.Username() + password, _ := u.Password() + return "Basic " + basicAuth(username, password) + } + return "" +} + +// A wantConn records state about a wanted connection +// (that is, an active call to getConn). +// The conn may be gotten by dialing or by finding an idle connection, +// or a cancellation may make the conn no longer wanted. +// These three options are racing against each other and use +// wantConn to coordinate and agree about the winning outcome. +type wantConn struct { + cm connectMethod + key connectMethodKey // cm.key() + ctx context.Context // context for dial + timeoutch chan struct{} // tmp timeout to replace ctx + ready bool + //ready chan struct{} // closed when pc, err pair is delivered + + // hooks for testing to know when dials are done + // beforeDial is called in the getConn goroutine when the dial is queued. + // afterDial is called when the dial is completed or canceled. + beforeDial func() + afterDial func() + + mu sync.Mutex // protects pc, err, close(ready) + pc *persistConn + err error +} + +// cancel marks w as no longer wanting a result (for example, due to cancellation). +// If a connection has been delivered already, cancel returns it with t.putOrCloseIdleConn. +func (w *wantConn) cancel(t *Transport, err error) { + w.mu.Lock() + if w.pc == nil && w.err == nil { + w.ready = true // catch misbehavior in future delivery + } + pc := w.pc + w.pc = nil + w.err = err + w.mu.Unlock() + + if pc != nil { + t.putOrCloseIdleConn(pc) + } +} + +// waiting reports whether w is still waiting for an answer (connection or error). +func (w *wantConn) waiting() bool { + if w.ready { + return false + } else { + return true + } +} + +// tryDeliver attempts to deliver pc, err to w and reports whether it succeeded. +func (w *wantConn) tryDeliver(pc *persistConn, err error) bool { + w.mu.Lock() + defer w.mu.Unlock() + + if w.pc != nil || w.err != nil { + return false + } + + w.pc = pc + w.err = err + if w.pc == nil && w.err == nil { + panic("net/http: internal error: misuse of tryDeliver") + } + w.ready = true + return true +} + +// A wantConnQueue is a queue of wantConns. +type wantConnQueue struct { + // This is a queue, not a deque. + // It is split into two stages - head[headPos:] and tail. + // popFront is trivial (headPos++) on the first stage, and + // pushBack is trivial (append) on the second stage. + // If the first stage is empty, popFront can swap the + // first and second stages to remedy the situation. + // + // This two-stage split is analogous to the use of two lists + // in Okasaki's purely functional queue but without the + // overhead of reversing the list when swapping stages. + head []*wantConn + headPos int + tail []*wantConn +} + +// len returns the number of items in the queue. +func (q *wantConnQueue) len() int { + return len(q.head) - q.headPos + len(q.tail) +} + +// pushBack adds w to the back of the queue. +func (q *wantConnQueue) pushBack(w *wantConn) { + q.tail = append(q.tail, w) +} + +// popFront removes and returns the wantConn at the front of the queue. +func (q *wantConnQueue) popFront() *wantConn { + if q.headPos >= len(q.head) { + if len(q.tail) == 0 { + return nil + } + // Pick up tail as new head, clear tail. + q.head, q.headPos, q.tail = q.tail, 0, q.head[:0] + } + w := q.head[q.headPos] + q.head[q.headPos] = nil + q.headPos++ + return w +} + +// peekFront returns the wantConn at the front of the queue without removing it. +func (q *wantConnQueue) peekFront() *wantConn { + if q.headPos < len(q.head) { + return q.head[q.headPos] + } + if len(q.tail) > 0 { + return q.tail[0] + } + return nil +} + +// cleanFront pops any wantConns that are no longer waiting from the head of the +// queue, reporting whether any were popped. +func (q *wantConnQueue) cleanFront() (cleaned bool) { + for { + w := q.peekFront() + if w == nil || w.waiting() { + return cleaned + } + q.popFront() + cleaned = true + } +} + +type connLRU struct { + ll *list.List // list.Element.Value type is of *persistConn + m map[*persistConn]*list.Element +} + +// add adds pc to the head of the linked list. +func (cl *connLRU) add(pc *persistConn) { + if cl.ll == nil { + cl.ll = list.New() + cl.m = make(map[*persistConn]*list.Element) + } + ele := cl.ll.PushFront(pc) + if _, ok := cl.m[pc]; ok { + panic("persistConn was already in LRU") + } + cl.m[pc] = ele +} + +func (cl *connLRU) removeOldest() *persistConn { + ele := cl.ll.Back() + pc := ele.Value.(*persistConn) + cl.ll.Remove(ele) + delete(cl.m, pc) + return pc +} + +// remove removes pc from cl. +func (cl *connLRU) remove(pc *persistConn) { + if ele, ok := cl.m[pc]; ok { + cl.ll.Remove(ele) + delete(cl.m, pc) + } +} + +// len returns the number of items in the cache. +func (cl *connLRU) len() int { + return len(cl.m) +} diff --git a/x/net/http/util.go b/x/net/http/util.go new file mode 100644 index 0000000..bfd9fc3 --- /dev/null +++ b/x/net/http/util.go @@ -0,0 +1,356 @@ +package http + +import ( + "strings" + "unicode" + "unicode/utf8" + + "golang.org/x/net/idna" + + "github.com/goplus/llgoexamples/x/net" +) + +/** + * Copied from the libraries that llgo cannot be used + */ + +var isTokenTable = [127]bool{ // httpguts.isTokenTable + '!': true, + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + '*': true, + '+': true, + '-': true, + '.': true, + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + 'A': true, + 'B': true, + 'C': true, + 'D': true, + 'E': true, + 'F': true, + 'G': true, + 'H': true, + 'I': true, + 'J': true, + 'K': true, + 'L': true, + 'M': true, + 'N': true, + 'O': true, + 'P': true, + 'Q': true, + 'R': true, + 'S': true, + 'T': true, + 'U': true, + 'W': true, + 'V': true, + 'X': true, + 'Y': true, + 'Z': true, + '^': true, + '_': true, + '`': true, + 'a': true, + 'b': true, + 'c': true, + 'd': true, + 'e': true, + 'f': true, + 'g': true, + 'h': true, + 'i': true, + 'j': true, + 'k': true, + 'l': true, + 'm': true, + 'n': true, + 'o': true, + 'p': true, + 'q': true, + 'r': true, + 's': true, + 't': true, + 'u': true, + 'v': true, + 'w': true, + 'x': true, + 'y': true, + 'z': true, + '|': true, + '~': true, +} + +func IsTokenRune(r rune) bool { // httpguts.IsTokenRune + i := int(r) + return i < len(isTokenTable) && isTokenTable[i] +} + +// ValidHeaderFieldName reports whether v is a valid HTTP/1.x header name. +// HTTP/2 imposes the additional restriction that uppercase ASCII +// letters are not allowed. +// +// RFC 7230 says: +// +// header-field = field-name ":" OWS field-value OWS +// field-name = token +// token = 1*tchar +// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." / +// "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA +func ValidHeaderFieldName(v string) bool { // httpguts.ValidHeaderFieldName + if len(v) == 0 { + return false + } + for i := 0; i < len(v); i++ { + if !isTokenTable[v[i]] { + return false + } + } + return true +} + +// ValidHeaderFieldValue reports whether v is a valid "field-value" according to +// http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 : +// +// message-header = field-name ":" [ field-value ] +// field-value = *( field-content | LWS ) +// field-content = +// +// http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 : +// +// TEXT = +// LWS = [CRLF] 1*( SP | HT ) +// CTL = +// +// RFC 7230 says: +// +// field-value = *( field-content / obs-fold ) +// obj-fold = N/A to http2, and deprecated +// field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ] +// field-vchar = VCHAR / obs-text +// obs-text = %x80-FF +// VCHAR = "any visible [USASCII] character" +// +// http2 further says: "Similarly, HTTP/2 allows header field values +// that are not valid. While most of the values that can be encoded +// will not alter header field parsing, carriage return (CR, ASCII +// 0xd), line feed (LF, ASCII 0xa), and the zero character (NUL, ASCII +// 0x0) might be exploited by an attacker if they are translated +// verbatim. Any request or response that contains a character not +// permitted in a header field value MUST be treated as malformed +// (Section 8.1.2.6). Valid characters are defined by the +// field-content ABNF rule in Section 3.2 of [RFC7230]." +// +// This function does not (yet?) properly handle the rejection of +// strings that begin or end with SP or HTAB. +func ValidHeaderFieldValue(v string) bool { // httpguts.ValidHeaderFieldValue + for i := 0; i < len(v); i++ { + b := v[i] + if isCTL(b) && !isLWS(b) { + return false + } + } + return true +} + +// isLWS reports whether b is linear white space, according +// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 +// +// LWS = [CRLF] 1*( SP | HT ) +func isLWS(b byte) bool { return b == ' ' || b == '\t' } // httpguts.isLWS + +// isCTL reports whether b is a control byte, according +// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 +// +// CTL = +func isCTL(b byte) bool { // httpguts.isCTL + const del = 0x7f // a CTL + return b < ' ' || b == del +} + +// HeaderValuesContainsToken reports whether any string in values +// contains the provided token, ASCII case-insensitively. +func HeaderValuesContainsToken(values []string, token string) bool { // httpguts.HeaderValuesContainsToken + for _, v := range values { + if headerValueContainsToken(v, token) { + return true + } + } + return false +} + +// headerValueContainsToken reports whether v (assumed to be a +// 0#element, in the ABNF extension described in RFC 7230 section 7) +// contains token amongst its comma-separated tokens, ASCII +// case-insensitively. +func headerValueContainsToken(v string, token string) bool { // httpguts.headerValueContainsToken + for comma := strings.IndexByte(v, ','); comma != -1; comma = strings.IndexByte(v, ',') { + if tokenEqual(trimOWS(v[:comma]), token) { + return true + } + v = v[comma+1:] + } + return tokenEqual(trimOWS(v), token) +} + +// PunycodeHostPort returns the IDNA Punycode version +// of the provided "host" or "host:port" string. +func PunycodeHostPort(v string) (string, error) { // httpguts.PunycodeHostPort + if isASCII(v) { + return v, nil + } + + host, port, err := net.SplitHostPort(v) + if err != nil { + // The input 'v' argument was just a "host" argument, + // without a port. This error should not be returned + // to the caller. + host = v + port = "" + } + host, err = idna.ToASCII(host) + if err != nil { + // Non-UTF-8? Not representable in Punycode, in any + // case. + return "", err + } + if port == "" { + return host, nil + } + return net.JoinHostPort(host, port), nil +} + +func isASCII(s string) bool { // httpguts.isASCII + for i := 0; i < len(s); i++ { + if s[i] >= utf8.RuneSelf { + return false + } + } + return true +} + +// ValidHostHeader reports whether h is a valid host header. +func ValidHostHeader(h string) bool { // httpguts.ValidHostHeader + // The latest spec is actually this: + // + // http://tools.ietf.org/html/rfc7230#section-5.4 + // Host = uri-host [ ":" port ] + // + // Where uri-host is: + // http://tools.ietf.org/html/rfc3986#section-3.2.2 + // + // But we're going to be much more lenient for now and just + // search for any byte that's not a valid byte in any of those + // expressions. + for i := 0; i < len(h); i++ { + if !validHostByte[h[i]] { + return false + } + } + return true +} + +// See the validHostHeader comment. +var validHostByte = [256]bool{ // httpguts.validHostByte + '0': true, '1': true, '2': true, '3': true, '4': true, '5': true, '6': true, '7': true, + '8': true, '9': true, + + 'a': true, 'b': true, 'c': true, 'd': true, 'e': true, 'f': true, 'g': true, 'h': true, + 'i': true, 'j': true, 'k': true, 'l': true, 'm': true, 'n': true, 'o': true, 'p': true, + 'q': true, 'r': true, 's': true, 't': true, 'u': true, 'v': true, 'w': true, 'x': true, + 'y': true, 'z': true, + + 'A': true, 'B': true, 'C': true, 'D': true, 'E': true, 'F': true, 'G': true, 'H': true, + 'I': true, 'J': true, 'K': true, 'L': true, 'M': true, 'N': true, 'O': true, 'P': true, + 'Q': true, 'R': true, 'S': true, 'T': true, 'U': true, 'V': true, 'W': true, 'X': true, + 'Y': true, 'Z': true, + + '!': true, // sub-delims + '$': true, // sub-delims + '%': true, // pct-encoded (and used in IPv6 zones) + '&': true, // sub-delims + '(': true, // sub-delims + ')': true, // sub-delims + '*': true, // sub-delims + '+': true, // sub-delims + ',': true, // sub-delims + '-': true, // unreserved + '.': true, // unreserved + ':': true, // IPv6address + Host expression's optional port + ';': true, // sub-delims + '=': true, // sub-delims + '[': true, + '\'': true, // sub-delims + ']': true, + '_': true, // unreserved + '~': true, // unreserved +} + +// IsPrint returns whether s is ASCII and printable according to +// https://tools.ietf.org/html/rfc20#section-4.2. +func IsPrint(s string) bool { // ascii.IsPrint + for i := 0; i < len(s); i++ { + if s[i] < ' ' || s[i] > '~' { + return false + } + } + return true +} + +// ToLower returns the lowercase version of s if s is ASCII and printable. +func ToLower(s string) (lower string, ok bool) { // ascii.ToLower + if !IsPrint(s) { + return "", false + } + return strings.ToLower(s), true +} + +// EqualFold is strings.EqualFold, ASCII only. It reports whether s and t +// are equal, ASCII-case-insensitively. +func EqualFold(s, t string) bool { // ascii.EqualFold + if len(s) != len(t) { + return false + } + for i := 0; i < len(s); i++ { + if lower(s[i]) != lower(t[i]) { + return false + } + } + return true +} + +// lower returns the ASCII lowercase version of b. +func lower(b byte) byte { // ascii.lower + if 'A' <= b && b <= 'Z' { + return b + ('a' - 'A') + } + return b +} + +// Is returns whether s is ASCII. +func Is(s string) bool { // ascii.Is + for i := 0; i < len(s); i++ { + if s[i] > unicode.MaxASCII { + return false + } + } + return true +} diff --git a/x/net/ipsock.go b/x/net/ipsock.go new file mode 100644 index 0000000..55e1b45 --- /dev/null +++ b/x/net/ipsock.go @@ -0,0 +1,95 @@ +package net + +import ( + "unsafe" + + "github.com/goplus/llgo/c" +) + +// JoinHostPort combines host and port into a network address of the +// form "host:port". If host contains a colon, as found in literal +// IPv6 addresses, then JoinHostPort returns "[host]:port". +// +// See func Dial for a description of the host and port parameters. +func JoinHostPort(host, port string) string { + // We assume that host is a literal IPv6 address if host has + // colons. + + if IndexByteString(host, ':') >= 0 { + return "[" + host + "]:" + port + } + return host + ":" + port +} + +// SplitHostPort splits a network address of the form "host:port", +// "host%zone:port", "[host]:port" or "[host%zone]:port" into host or +// host%zone and port. +// +// A literal IPv6 address in hostport must be enclosed in square +// brackets, as in "[::1]:80", "[::1%lo0]:80". +// +// See func Dial for a description of the hostport parameter, and host +// and port results. +func SplitHostPort(hostport string) (host, port string, err error) { + const ( + missingPort = "missing port in address" + tooManyColons = "too many colons in address" + ) + addrErr := func(addr, why string) (host, port string, err error) { + return "", "", &AddrError{Err: why, Addr: addr} + } + j, k := 0, 0 + + // The port starts after the last colon. + i := last(hostport, ':') + if i < 0 { + return addrErr(hostport, missingPort) + } + + if hostport[0] == '[' { + // Expect the first ']' just before the last ':'. + end := IndexByteString(hostport, ']') + if end < 0 { + return addrErr(hostport, "missing ']' in address") + } + switch end + 1 { + case len(hostport): + // There can't be a ':' behind the ']' now. + return addrErr(hostport, missingPort) + case i: + // The expected result. + default: + // Either ']' isn't followed by a colon, or it is + // followed by a colon that is not the last one. + if hostport[end+1] == ':' { + return addrErr(hostport, tooManyColons) + } + return addrErr(hostport, missingPort) + } + host = hostport[1:end] + j, k = 1, end+1 // there can't be a '[' resp. ']' before these positions + } else { + host = hostport[:i] + if IndexByteString(host, ':') >= 0 { + return addrErr(hostport, tooManyColons) + } + } + if IndexByteString(hostport[j:], '[') >= 0 { + return addrErr(hostport, "unexpected '[' in address") + } + if IndexByteString(hostport[k:], ']') >= 0 { + return addrErr(hostport, "unexpected ']' in address") + } + + port = hostport[i+1:] + return host, port, nil +} + +func IndexByteString(s string, ch byte) int { // bytealg.IndexByteString + ptr := unsafe.Pointer(unsafe.StringData(s)) + ret := c.Memchr(ptr, c.Int(ch), uintptr(len(s))) + if ret != nil { + return int(uintptr(ret) - uintptr(ptr)) + } + return -1 +} diff --git a/x/net/net.go b/x/net/net.go new file mode 100644 index 0000000..3267d90 --- /dev/null +++ b/x/net/net.go @@ -0,0 +1,20 @@ +package net + +type AddrError struct { + Err string + Addr string +} + +func (e *AddrError) Error() string { + if e == nil { + return "" + } + s := e.Err + if e.Addr != "" { + s = "address " + e.Addr + ": " + s + } + return s +} + +func (e *AddrError) Timeout() bool { return false } +func (e *AddrError) Temporary() bool { return false } diff --git a/x/net/parse.go b/x/net/parse.go new file mode 100644 index 0000000..c110fcf --- /dev/null +++ b/x/net/parse.go @@ -0,0 +1,12 @@ +package net + +// Index of rightmost occurrence of b in s. +func last(s string, b byte) int { + i := len(s) + for i--; i >= 0; i-- { + if s[i] == b { + break + } + } + return i +}