diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 00000000..de1e1701 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,33 @@ +name: CI + +on: + push: + branches: + - master + paths-ignore: + - '**.md' + pull_request: + branches: + - master + paths-ignore: + - '**.md' + + # Allows you to run this workflow manually from the Actions tab + workflow_dispatch: + +jobs: + test: + strategy: + matrix: + go: [ '1.22.x', '1.23.x' ] + os: [ ubuntu-latest ] + runs-on: ${{ matrix.os }} + steps: + - name: Checkout + uses: actions/checkout@v2 + - name: Setup Go + uses: actions/setup-go@v2 + with: + go-version: ${{ matrix.go }} + - name: Test + run: go test ./... -coverprofile=coverage.txt diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..d2bf3553 --- /dev/null +++ b/.gitignore @@ -0,0 +1,34 @@ +# IDE ignore +.idea/ +*.ipr +*.iml +*.iws +.vscode/ + +# Emacs save files +*~ +\#*\# +.\#* + +# Vim-related files +[._]*.s[a-w][a-z] +[._]s[a-w][a-z] +*.un~ +Session.vim +.netrwhist + +# make-related metadata +/.make/ + +# temp ignore +*.log +*.cache +*.diff +*.exe +*.exe~ +*.patch +*.tmp +*.swp + +# OSX trash +.DS_Store \ No newline at end of file diff --git a/.testdata/sample-client-key.pem b/.testdata/sample-client-key.pem new file mode 100644 index 00000000..47c6e050 --- /dev/null +++ b/.testdata/sample-client-key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEA779bgekwF6sVauTdq12ZB+5Say3pq0Aq43fQ8MniYPGDdhPZ +PnI8pMp08tuiSAZIatyX/NozobMs/U/U6aN8Vmbf8ERfXmN5kYLN1585RObyQkwR ++PS9jLUw98fuYnumPExb8hMDPB2QZIBu7oZRrzr3hdyw4/aqv9msyYi5rw/BdwA0 +mt9HqXZAyD23x5oKnEly5Uf/CQXF3BM8Zus05b4Nlig8oKmDPNccew3ly6JLpXHg +THMyKzNmzm2Sko4JsutMEL/TV5GmOjpM0SCG62F85K+X5l0bARNwc9qtoKpzeMcz +CM3g4BCdyDylup7nXyi4VAv4dJ0aPHw8Nc1rpQIDAQABAoIBAC8NQSpH15ZtjzCB +ZjfBkM0LqsU6J4fieghWdX0sQe+Atqovzw0AYoJ88WLQVBMKmJ/QV0vajxOHFKdK +SaDo4vgaDI0c/hKKN0ulfjx5FUY+hQEZ6NURQzogPVIDvPc7CS8AVXM25AWiT7pJ +snvBhLp9OiLdYyH6QRyR3eVXngmLDtAJVz86HW0GIjWLNjApigdMjAQDCE1AlW/H +HuLZbfhUZ2EtcFI98OZfWHPwLchGimIeo/WxciFrzkkkbCyvVyv8QygK0FfhMsh0 +SIWFZxbxx21WsDeDaX75qr+sLH2IzCjazkgDbZTdY9GsLqt8CD9YFzBxazmvH8R0 +de/dFGkCgYEA+er0+wF266kgdA0Js2u4szh0Nz6ljaBqzCLTcKj/mdNnc99oRJJO +1HZET4/hoIN0OTZvIH7/jx7SJSIDVOgSGpAF1wnoHhT/kjnXC6t5MDCRuceZIV76 +EbTAGCMwDz1yAFmTMr6qMXUcOCsBBAtlNKDtiqXshnUfctCKXlw8NP8CgYEA9ZUJ +hw1/g6Bomp7VCtwk+FBQ40SRjvrMrlJWJ39/Ezbp5I9I4DfJzTjQryg5ff+Ggq0/ +sUQrJsr6XF2AyQx1kV0vb5tdIlFjVNCErE/ZzVklss9r8BiSMBAIyqeMFVZWT6kd +pY1/uj1Q+Y5stP9iEWq/r5mmW8N+lJnq5H3Aa1sCgYEAzySfySx9lPapn4bu83fl +ryarrN6P+cNswaZb+pUYxjcjGDekBLIABLnCBPAM4y4RtxoXIaghyk6Rf5WhjU6N +MtcNAB+F9OkSq/Ck/VczK24WWxXFJpPCUcqvLVJ9EySqyP91sim2hye6LBP405Fe +YTDBspm0Yf3SAyg2h9+LR6ECgYBENAz+VfBZBP6oGn5+Up9t2xhr1co7FEouC63j +sFQBaRnSIT0TEEtaVHIYgypcZM/dkPIEcDMvxeV8K3et3mj0YxXegB6AfmwAzRxb +op2RmzWOEG8gsiI/eOSIK7oK3vx/iS8zoDWd6pOHi1eDeP2qaqQrx5ddGtEXwhtr +M8VxywKBgQDUVxNDU5U+Fplr5XyxsmemnwbvzkJW0Iz04yxSyH+WRUtGnJoL6d5v +fYhwL60gFFh3FFWTOiQxlvEOnhpfdqufCcO4PtHYxRMG37faBCb1ewQlcQSZ0n7i +jQlLzfStlPRP8QEVBW/oc4aMDO7CVP77j5g0Wzt7Kuyh0mFYx20alg== +-----END RSA PRIVATE KEY----- diff --git a/.testdata/sample-client.pem b/.testdata/sample-client.pem new file mode 100644 index 00000000..fb029bba --- /dev/null +++ b/.testdata/sample-client.pem @@ -0,0 +1,23 @@ +-----BEGIN CERTIFICATE----- +MIIDxzCCAq+gAwIBAgIUGDq1keCeZFfm+Pp7nYCpfUyTltQwDQYJKoZIhvcNAQEL +BQAwWjELMAkGA1UEBhMCQ04xEDAOBgNVBAgTB1NpQ2h1YW4xEDAOBgNVBAcTB0No +ZW5nRHUxDDAKBgNVBAoTA3JlcTELMAkGA1UECxMCQ0ExDDAKBgNVBAMTA3JlcTAg +Fw0yMjAyMTAwNTEwMDBaGA8yMTIyMDExNzA1MTAwMFowXjELMAkGA1UEBhMCQ04x +EDAOBgNVBAgTB1NpQ2h1YW4xEDAOBgNVBAcTB0NoZW5nZHUxDDAKBgNVBAoTA3Jl +cTEMMAoGA1UECxMDcmVxMQ8wDQYDVQQDEwZjbGllbnQwggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQDvv1uB6TAXqxVq5N2rXZkH7lJrLemrQCrjd9DwyeJg +8YN2E9k+cjykynTy26JIBkhq3Jf82jOhsyz9T9Tpo3xWZt/wRF9eY3mRgs3XnzlE +5vJCTBH49L2MtTD3x+5ie6Y8TFvyEwM8HZBkgG7uhlGvOveF3LDj9qq/2azJiLmv +D8F3ADSa30epdkDIPbfHmgqcSXLlR/8JBcXcEzxm6zTlvg2WKDygqYM81xx7DeXL +okulceBMczIrM2bObZKSjgmy60wQv9NXkaY6OkzRIIbrYXzkr5fmXRsBE3Bz2q2g +qnN4xzMIzeDgEJ3IPKW6nudfKLhUC/h0nRo8fDw1zWulAgMBAAGjfzB9MA4GA1Ud +DwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDAYDVR0T +AQH/BAIwADAdBgNVHQ4EFgQUoa8OEGUk7FwiwKrw0NpjiSv9WCwwHwYDVR0jBBgw +FoAUpNtNypX4RfRXBAXxI2MtrodFNMswDQYJKoZIhvcNAQELBQADggEBAAO6nZtS +MPb1PSUAL7pe9ZiCL4VH+ED/RkW+JSi/nJXJisvQJqEqUCw3nkTQik6yT7+Dr8sp +0rUNwxzBNUi7ceHRsThtUcjDXN7vgZfMDU+hC3DssgfmrqtdHefkD7m4MdAJjg9F ++kKOkKmCCzJ2sGMnFhVW3gQDf/PHl4VoZeBEATfqGqxmQROTJuLCpus3/yFxmsu1 +nT1x4K7HLzdWztxCQ/Nq/DpjD0nIMXicBFPnQamv+PHePS5NT+UEBvvbz/747aYI +LG6Jczhl3oK3zjuLAwW5QSsID7CERKclZCy6BuMekAEkQeqL79T0joUUVb5ywhQz +qHRB9DxZiGxazDU= +-----END CERTIFICATE----- diff --git a/.testdata/sample-file.txt b/.testdata/sample-file.txt new file mode 100644 index 00000000..032120aa --- /dev/null +++ b/.testdata/sample-file.txt @@ -0,0 +1,3 @@ +THIS IS A SAMPLE FILE FOR TEST + +https://github.com/imroc/req diff --git a/.testdata/sample-gbk.html b/.testdata/sample-gbk.html new file mode 100644 index 00000000..cc7b22fa --- /dev/null +++ b/.testdata/sample-gbk.html @@ -0,0 +1,353 @@ + + + + + 我是roc + + + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + 大家好,我是roc + + diff --git a/.testdata/sample-image.png b/.testdata/sample-image.png new file mode 100644 index 00000000..5eee2c93 Binary files /dev/null and b/.testdata/sample-image.png differ diff --git a/.testdata/sample-root.pem b/.testdata/sample-root.pem new file mode 100644 index 00000000..9a3a0648 --- /dev/null +++ b/.testdata/sample-root.pem @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDhDCCAmygAwIBAgIUfoFqa4MCUjMpXdMg7GQphA4aeDowDQYJKoZIhvcNAQEL +BQAwWjELMAkGA1UEBhMCQ04xEDAOBgNVBAgTB1NpQ2h1YW4xEDAOBgNVBAcTB0No +ZW5nRHUxDDAKBgNVBAoTA3JlcTELMAkGA1UECxMCQ0ExDDAKBgNVBAMTA3JlcTAe +Fw0yMjAyMTAwNDUwMDBaFw0yNzAyMDkwNDUwMDBaMFoxCzAJBgNVBAYTAkNOMRAw +DgYDVQQIEwdTaUNodWFuMRAwDgYDVQQHEwdDaGVuZ0R1MQwwCgYDVQQKEwNyZXEx +CzAJBgNVBAsTAkNBMQwwCgYDVQQDEwNyZXEwggEiMA0GCSqGSIb3DQEBAQUAA4IB +DwAwggEKAoIBAQDrHGzHhcx+3ZyxS0BwAo+jse9587uBpAo+DseSVFPShgDNBjkc +/VpdYIzXJJ5VJGv4+6zeidfh0XGElwi6J+7xJPrZu5Dx4UTD3buNIUDz7BVIhRFJ +fJr9IrsFn4oYPduRK07Ij4ccOWIszdnc6Tk/2r2iEKwtqA/SEOWV/34YE+72K4vD +FR/qepG6lraeCg1FvYlNEg2QDGVXc9Npc735vgh7IpXJAEOuE2hDALKOJg9233Bn +qE0iSk8tXJ5NMB1r4NRnEHGMlpcZf/2ZBC1Lb9clUS3qpDzNRn0RxANoANAQ8iVG +p8ysizgk9k4CnUrwPcNHkoTUvVHZbPFGbzzPAgMBAAGjQjBAMA4GA1UdDwEB/wQE +AwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSk203KlfhF9FcEBfEjYy2u +h0U0yzANBgkqhkiG9w0BAQsFAAOCAQEAeISMnE6KNJ+g5hORssVqeY2Y8rZmA8tm +NmJQX5OpmPj4M59BcmfEaurbCm4yprDHJdVJPotEUCnOmENHFkO+xhCSJHbXfT2J +Vb4yuIRBT5UrkKjO/RF3Ca0tYbPgDZZtdb/7VcNfR/qGar9AYEgA8GM8D6y6m1/p +L/523TV06kI9tiS6flmxJtu2ydvpimAkWwtt4sDEp7g+gB+AHt4NUnkzzH7gXB25 +G2cy9mtJ45ah+bX9niCOZOdSFSPKXSGCh6DqDrtKcHUK7noUu7SCH0WdX9A7KKGo +6PfnwBRh2eXJ35BgCcHE4IM/isT+v/QVt1W0hQPE1PVhSNnNIlO0og== +-----END CERTIFICATE----- diff --git a/.testdata/sample-server-key.pem b/.testdata/sample-server-key.pem new file mode 100644 index 00000000..6d76bb3a --- /dev/null +++ b/.testdata/sample-server-key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAtAz2shdfO212Pktb//RFHXRi70utH7Xnygz9WN6yQZUg6mbs +W1tFRxUo2JOKm/feTGJ6VohFEIdv7kMTVFCpwSDe31sz9yM1l2D85adJXQuOpiMV +ux264RnyTJIVLhkFVvHyGohWYAwrMGoeV+8lBzTnbhnk0OHujJCdw1iLohc3dodE +JNRXxOB6LZ/3kj1qo1IjQwlRpDXuGRRBNO8GBW8G72QaYTamRP0bvByJ09nuWlxZ +V2ouCLYXRAhbx3BXCj1zEiqEQBAEOhQrooPNHqgrsYapvyVHmWJFDOZaIkGNfHKn +qPtM8RhsD0yqKtmY38voQaowfHKKS6F52JYP+QIDAQABAoIBAHtGUPXYaLygko/O +OvxA+71R/ZcHgk4u1reRMzjQqM2cVEAJHhTipckoZKH8Sq/FAu/bkRWEEX1irbE9 +PZPB8qgnYFEe+bJg6gVuQ1jds65ABngbl3pYvaX3hN0GO/gm62//EZs286SpUDzC +u2nLc9e+UiIhGngl6JVXQp0IF/pusWsnKN34QK81qRjCEInlPLI04o4Imy/3a/zc +a4DwCwMqXbYHyv97nufApQB/1qYjiKcrhhk9AeRr3vZyOFrYec2avQKuWhLrGpRN +koNRIPKrgTKRq3gsSmCiPiSRUD40pmtJujhAJqvcZsqSqyv+J2yPdjjTAyqyV/29 +M98GwoECgYEAwf75kTv+k4INdnJVvMXdWFcxkgzq8/fLmBOGoTNzKj1fHTDZFryD +X41ymNBee26YaUMTMg7NahaaT8s+bk7LKjYFODhD6VVdvfagVchbkB5wK27Cwa0z +XzExLFtnUlE1o/ciZwqUlzLxRXQKlfTHCfiALqBmMIEDK0CRCtxY2rECgYEA7Zj1 +ynolm5jc84yUiHhCKUUVaL0mP9IFV5Dydo+UymzJmjMUx1H7baFbHo2rZ6plkRop +AjH5LZ8cJ06EgGu9rOlHwwKYIA2FKnzwCiFE9nH8BE+ki2mBNAwsj+NtCBTvEBsN +b3byhHMMdrJj6CPgaZZOCAHHp3kBKjNmL4Hoy8kCgYEArrqM5jb3MLzui0Sn3IMK +vkqqpzVjWaJSigLsO70veVgVlyEsJsJcQXARS3pB30LZm9WCMJAMjAUXr88LyCbH +7pkBUoW7BSqSaEr+VsVDUydXOIdmezMZFiAkfiNFiGsEuU4aelyZQSXtEfVWo4H4 +1A4yxcxKvl01EXvyJ6oXjcECgYA8JTJjNRR8FPApvvaCrV6iL9jBkNAz66hqiEi4 +dpRFwdAu9qtV4YzyLZxxWY+ASIQ5fRPQeHIJeHOaB6hHEf8L3GnMFcYIpyOEo+fn +yJA6ipQvSzHuEKEiWcqWCg45s4Lo4tA93TB7Etye132u8BYI5IGQSVMPM/R1iFlf +wVT68QKBgAxY1euChqV+Wio74IaNiHsnk6KzLiUaBOV5i1xA1Kz81mownYI1n9jk +LXTzgTrmJ8jtL0HmaJcl7plIre8h3WAQhHFXWjPhmE+YhwVfPU77JgA1o5Pn7KKm +NDoSb3GDRYHuBaAK5SvBxQ9re6rkueK+N5cdcB7ozt1h1FLFz0/D +-----END RSA PRIVATE KEY----- diff --git a/.testdata/sample-server.pem b/.testdata/sample-server.pem new file mode 100644 index 00000000..81c9effd --- /dev/null +++ b/.testdata/sample-server.pem @@ -0,0 +1,23 @@ +-----BEGIN CERTIFICATE----- +MIID6DCCAtCgAwIBAgIUPpYORWb7lrYrrfmFRDrugeHzncUwDQYJKoZIhvcNAQEL +BQAwWjELMAkGA1UEBhMCQ04xEDAOBgNVBAgTB1NpQ2h1YW4xEDAOBgNVBAcTB0No +ZW5nRHUxDDAKBgNVBAoTA3JlcTELMAkGA1UECxMCQ0ExDDAKBgNVBAMTA3JlcTAg +Fw0yMjAyMTAwNTA0MDBaGA8yMTIyMDExNzA1MDQwMFowXjELMAkGA1UEBhMCQ04x +EDAOBgNVBAgTB1NpQ2h1YW4xEDAOBgNVBAcTB0NoZW5nZHUxDDAKBgNVBAoTA3Jl +cTEMMAoGA1UECxMDcmVxMQ8wDQYDVQQDEwZzZXJ2ZXIwggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQC0DPayF187bXY+S1v/9EUddGLvS60ftefKDP1Y3rJB +lSDqZuxbW0VHFSjYk4qb995MYnpWiEUQh2/uQxNUUKnBIN7fWzP3IzWXYPzlp0ld +C46mIxW7HbrhGfJMkhUuGQVW8fIaiFZgDCswah5X7yUHNOduGeTQ4e6MkJ3DWIui +Fzd2h0Qk1FfE4Hotn/eSPWqjUiNDCVGkNe4ZFEE07wYFbwbvZBphNqZE/Ru8HInT +2e5aXFlXai4IthdECFvHcFcKPXMSKoRAEAQ6FCuig80eqCuxhqm/JUeZYkUM5loi +QY18cqeo+0zxGGwPTKoq2Zjfy+hBqjB8copLoXnYlg/5AgMBAAGjgZ8wgZwwDgYD +VR0PAQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNV +HRMBAf8EAjAAMB0GA1UdDgQWBBSjaTM5PsU7WKjGqHjLG1ShTukoljAfBgNVHSME +GDAWgBSk203KlfhF9FcEBfEjYy2uh0U0yzAdBgNVHREEFjAUgglsb2NhbGhvc3SH +BH8AAAGGASowDQYJKoZIhvcNAQELBQADggEBAMr4rw7MAwCIJNuFxHukWIxIyq6B +g2P2kFIU+oWVhKd0VlHZ/lrjR3eB1GJ4lC8n+yslEYA0nipEBZm1zABUKNhRhmam +Oi8Gf09yOg5+NZM7BihgK+AF1Kc2a282XpQOHqqqr20QAeO9RLBwBjxc4koxgPog +HSVgbNceemxFrfT8kzjyjv9SRpeRjeAYLILHxPABRVEuO5rOMoRhZOGMxb65IAuT +aFbOPIdBsW1d2/cx5hQT1yfXXOjFXvKVL3pYkEDq+61E40G8Sfr1ZqLnQs6+Fhiy +vUsHXX7yq6hnyrhVy1wTL0mLqadK+umEybkalnCMHVlusNnLXuf43k9xlFU= +-----END CERTIFICATE----- diff --git a/LICENSE b/LICENSE index 8dada3ed..70f3d40a 100644 --- a/LICENSE +++ b/LICENSE @@ -1,201 +1,21 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. +MIT License + +Copyright (c) 2017-2022 roc + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/README.md b/README.md index bdc77fc9..c8191fd2 100644 --- a/README.md +++ b/README.md @@ -1,302 +1,547 @@ # req -[![GoDoc](https://godoc.org/github.com/imroc/req?status.svg)](https://godoc.org/github.com/imroc/req) -A golang http request library for humans +

+

+

Simple Go HTTP client with Black Magic

+

+ Build Status + Go Report Card + + License + GitHub Releases + Mentioned in Awesome Go +

+

+## Documentation +Full documentation is available on the official website: https://req.cool. -Features -======== +## Features -- Light weight -- Simple -- Easy play with JSON and XML -- Easy for debug and logging -- Easy file uploads and downloads -- Easy manage cookie -- Easy set up proxy -- Easy set timeout -- Easy customize http client +* **Simple and Powerful**: Simple and easy to use, providing rich client-level and request-level settings, all of which are intuitive and chainable methods. +* **Easy Debugging**: Powerful and convenient debug utilities, including debug logs, performance traces, and even dump the complete request and response content (see [Debugging](https://req.cool/docs/tutorial/debugging/)). +* **Easy API Testing**: API testing can be done with minimal code, no need to explicitly create any Request or Client, or even to handle errors (See [Quick HTTP Test](https://req.cool/docs/tutorial/quick-test/)) +* **Smart by Default**: Detect and decode to utf-8 automatically if possible to avoid garbled characters (See [Auto Decode](https://req.cool/docs/tutorial/auto-decode/)), marshal request body and unmarshal response body automatically according to the Content-Type. +* **Support Multiple HTTP Versions**: Support `HTTP/1.1`, `HTTP/2`, and `HTTP/3`, and can automatically detect the server side and select the optimal HTTP version for requests, you can also force the protocol if you want (See [Force HTTP version](https://req.cool/docs/tutorial/force-http-version/)). +* **Support Retry**: Support automatic request retry and is fully customizable (See [Retry](https://req.cool/docs/tutorial/retry/)). +* **HTTP Fingerprinting**: Support http fingerprint impersonation, so that we can access websites that prohibit crawler programs by identifying http fingerprints (See [HTTP Fingerprint](https://req.cool/docs/tutorial/http-fingerprint/)). +* **Multiple Authentication Methods**: You can use HTTP Basic Auth, Bearer Auth Token and Digest Auth out of box (see [Authentication](https://req.cool/docs/tutorial/authentication/)). +* **Easy Download and Upload**: You can download and upload files with simple request settings, and even set a callback to show real-time progress (See [Download](https://req.cool/docs/tutorial/download/) and [Upload](https://req.cool/docs/tutorial/upload/)). +* **Exportable**: `req.Transport` is exportable. Compared with `http.Transport`, it also supports HTTP3, dump content, middleware, etc. It can directly replace the Transport of `http.Client` in existing projects, and obtain more powerful functions with minimal code change. +* **Extensible**: Support Middleware for Request, Response, Client and Transport (See [Request and Response Middleware](https://req.cool/docs/tutorial/middleware-for-request-and-response/)) and [Client and Transport Middleware](https://req.cool/docs/tutorial/middleware-for-client-and-transport/)). +## Get Started -Document -======== -[涓枃](doc/README_cn.md) +**Install** +You first need [Go](https://go.dev/) installed (version 1.22+ is required), then you can use the below Go command to install req: -Install -======= ``` sh -go get github.com/imroc/req +go get github.com/imroc/req/v3 ``` -Overview -======= -`req` implements a friendly API over Go's existing `net/http` library. - -`Req` and `Resp` are two most important struct, you can think of `Req` as a client that initiate HTTP requests, `Resp` as a information container for the request and response. They all provide simple and convenient APIs that allows you to do a lot of things. -``` go -func (r *Req) Post(url string, v ...interface{}) (*Resp, error) -``` - -In most cases, only url is required, others are optional, like headers, params, files or body etc. - -There is a default `Req` object, all of its' public methods are wrapped by the `req` package, so you can also think of `req` package as a `Req` object -``` go -// use Req object to initiate requests. -r := req.New() -r.Get(url) - -// use req package to initiate reqeust. -req.Get(url) +**Import** + +Import req to your code: + +```go +import "github.com/imroc/req/v3" ``` -You can use `req.New()` to create lots of `*Req` as client with independent configuration - -Examples -======= -[Basic](#Basic) -[Set Header](#Set-Header) -[Set Param](#Set-Param) -[Set Body](#Set-Body) -[Debug](#Debug) -[Output Format](#Format) -[ToJSON & ToXML](#ToJSON-ToXML) -[Get *http.Response](#Response) -[Upload](#Upload) -[Download](#Download) -[Cookie](#Cookie) -[Set Timeout](#Set-Timeout) -[Set Proxy](#Set-Proxy) -[Customize Client](#Customize-Client) - -## Basic -``` go -header := req.Header{ - "Accept": "application/json", - "Authorization": "Basic YWRtaW46YWRtaW4=", -} -param := req.Param{ - "name": "imroc", - "cmd": "add", -} -// only url is required, others are optional. -r, err = req.Post("http://foo.bar/api", header, param) -if err != nil { - log.Fatal(err) -} -r.ToJSON(&foo) // response => struct/map -log.Printf("%+v", r) // print info (try it, you may surprise) + +**Basic Usage** + +```bash +# assume the following codes in main.go file +$ cat main.go ``` -## Set Header -Use `req.Header` (it is actually a `map[string]string`) -``` go -authHeader := req.Header{ - "Accept": "application/json", - "Authorization": "Basic YWRtaW46YWRtaW4=", +```go +package main + +import ( + "github.com/imroc/req/v3" +) + +func main() { + req.DevMode() // Treat the package name as a Client, enable development mode + req.MustGet("https://httpbin.org/uuid") // Treat the package name as a Request, send GET request. + + req.EnableForceHTTP1() // Force using HTTP/1.1 + req.MustGet("https://httpbin.org/uuid") } -req.Get("https://www.baidu.com", authHeader, req.Header{"User-Agent": "V1.1"}) -``` -use `http.Header` -``` go -header := make(http.Header) -header.Set("Accept", "application/json") -req.Get("https://www.baidu.com", header) ``` -## Set Param -Use `req.Param` (it is actually a `map[string]interface{}`) -``` go -param := req.Param{ - "id": "imroc", - "pwd": "roc", +```bash +$ go run main.go +2022/05/19 10:05:07.920113 DEBUG [req] HTTP/2 GET https://httpbin.org/uuid +:authority: httpbin.org +:method: GET +:path: /uuid +:scheme: https +user-agent: req/v3 (https://github.com/imroc/req/v3) +accept-encoding: gzip + +:status: 200 +date: Thu, 19 May 2022 02:05:08 GMT +content-type: application/json +content-length: 53 +server: gunicorn/19.9.0 +access-control-allow-origin: * +access-control-allow-credentials: true + +{ + "uuid": "bd519208-35d1-4483-ad9f-e1555ae108ba" } -req.Get("http://foo.bar/api", param) // http://foo.bar/api?id=imroc&pwd=roc -req.Post(url, param) // body => id=imroc&pwd=roc -``` -use `req.QueryParam` force to append params to the url (it is also actually a `map[string]interface{}`) -``` go -req.Post("http://foo.bar/api", req.Param{"name": "roc", "age": "22"}, req.QueryParam{"access_token": "fedledGF9Hg9ehTU"}) -/* -POST /api?access_token=fedledGF9Hg9ehTU HTTP/1.1 -Host: foo.bar -User-Agent: Go-http-client/1.1 -Content-Length: 15 -Content-Type: application/x-www-form-urlencoded;charset=UTF-8 + +2022/05/19 10:05:09.340974 DEBUG [req] HTTP/1.1 GET https://httpbin.org/uuid +GET /uuid HTTP/1.1 +Host: httpbin.org +User-Agent: req/v3 (https://github.com/imroc/req/v3) Accept-Encoding: gzip -age=22&name=roc -*/ +HTTP/1.1 200 OK +Date: Thu, 19 May 2022 02:05:09 GMT +Content-Type: application/json +Content-Length: 53 +Connection: keep-alive +Server: gunicorn/19.9.0 +Access-Control-Allow-Origin: * +Access-Control-Allow-Credentials: true + +{ + "uuid": "49b7f916-c6f3-49d4-a6d4-22ae93b71969" +} ``` -## Set Body -Put `string`, `[]byte` and `io.Reader` as body directly. -``` go -req.Post(url, "id=roc&cmd=query") -``` -Put object as xml or json body (add `Content-Type` header automatically) -``` go -req.Post(url, req.BodyJSON(&foo)) -req.Post(url, req.BodyXML(&bar)) -``` +The sample code above is good for quick testing purposes, which use `DevMode()` to see request details, and send requests using global wrapper methods that use the default client behind the scenes to initiate the request. -## Debug -Set global variable `req.Debug` to true, it will print detail infomation for every request. -``` go -req.Debug = true -req.Post("http://localhost/test" "hi") -``` -![post](doc/post.png) +In production, it is recommended to explicitly create a client, and then use the same client to send all requests, please see other examples below. -## Output Format -You can use different kind of output format to log the request and response infomation in your log file in defferent scenarios. For example, use `%+v` output format in the development phase, it allows you to observe the details. Use `%v` or `%-v` output format in production phase, just log the information necessarily. +**Videos** -### `%+v` or `%+s` -Output in detail -``` go -r, _ := req.Post(url, header, param) -log.Printf("%+v", r) // output the same format as Debug is enabled -``` +The following is a series of video tutorials for req: -### `%v` or `%s` -Output in simple way (default format) -``` go -r, _ := req.Get(url, param) -log.Printf("%v\n", r) // GET http://foo.bar/api?name=roc&cmd=add {"code":"0","msg":"success"} -log.Prinln(r) // smae as above -``` +* [Youtube Play List](https://www.youtube.com/watch?v=Dy8iph8JWw0&list=PLnW6i9cc0XqlhUgOJJp5Yf1FHXlANYMhF&index=2) +* [BiliBili 鎾斁鍒楄〃](https://www.bilibili.com/video/BV14t4y1J7cm) (Chinese) + +**More** -### `%-v` or `%-s` -Output in simple way and keep all in one line (request body or response body may have multiple lines, this format will replace `"\r"` or `"\n"` with `" "`, it's useful when doing some search in your log file) - -### Flag -You can call `SetFlags` to control the output content, decide which pieces can be output. -``` go -const ( - LreqHead = 1 << iota // output request head (request line and request header) - LreqBody // output request body - LrespHead // output response head (response line and response header) - LrespBody // output response body - Lcost // output time costed by the request - LstdFlags = LreqHead | LreqBody | LrespHead | LrespBody +Check more introduction, tutorials, examples, best practices and API references on the [official website](https://req.cool/). + +## Simple GET + +```go +package main + +import ( + "fmt" + "github.com/imroc/req/v3" + "log" ) -``` -``` go -req.SetFlags(req.LreqHead | req.LreqBody | req.LrespHead) -``` -### Monitoring time consuming -``` go -req.SetFlags(req.LstdFlags | req.Lcost) // output format add time costed by request -r,_ := req.Get(url) -log.Println(r) // http://foo.bar/api 3.260802ms {"code":0 "msg":"success"} -if r.Cost() > 3 * time.Second { // check cost - log.Println("WARN: slow request:", r) +func main() { + client := req.C() // Use C() to create a client. + resp, err := client.R(). // Use R() to create a request. + Get("https://httpbin.org/uuid") + if err != nil { + log.Fatal(err) + } + fmt.Println(resp) } ``` -## ToJSON & ToXML -``` go -r, _ := req.Get(url) -r.ToJSON(&foo) -r, _ = req.Post(url, req.BodyXML(&bar)) -r.ToXML(&baz) +```txt +{ + "uuid": "a4d4430d-0e5f-412f-88f5-722d84bc2a62" +} ``` -## Get *http.Response +## Advanced GET + ```go -// func (r *Req) Response() *http.Response -r, _ := req.Get(url) -resp := r.Response() -fmt.Println(resp.StatusCode) -``` +package main + +import ( + "fmt" + "github.com/imroc/req/v3" + "log" + "time" +) + +type ErrorMessage struct { + Message string `json:"message"` +} + +type UserInfo struct { + Name string `json:"name"` + Blog string `json:"blog"` +} -## Upload -Use `req.File` to match files -``` go -req.Post(url, req.File("imroc.png"), req.File("/Users/roc/Pictures/*.png")) +func main() { + client := req.C(). + SetUserAgent("my-custom-client"). // Chainable client settings. + SetTimeout(5 * time.Second) + + var userInfo UserInfo + var errMsg ErrorMessage + resp, err := client.R(). + SetHeader("Accept", "application/vnd.github.v3+json"). // Chainable request settings. + SetPathParam("username", "imroc"). // Replace path variable in url. + SetSuccessResult(&userInfo). // Unmarshal response body into userInfo automatically if status code is between 200 and 299. + SetErrorResult(&errMsg). // Unmarshal response body into errMsg automatically if status code >= 400. + EnableDump(). // Enable dump at request level, only print dump content if there is an error or some unknown situation occurs to help troubleshoot. + Get("https://api.github.com/users/{username}") + + if err != nil { // Error handling. + log.Println("error:", err) + log.Println("raw content:") + log.Println(resp.Dump()) // Record raw content when error occurs. + return + } + + if resp.IsErrorState() { // Status code >= 400. + fmt.Println(errMsg.Message) // Record error message returned. + return + } + + if resp.IsSuccessState() { // Status code is between 200 and 299. + fmt.Printf("%s (%s)\n", userInfo.Name, userInfo.Blog) + return + } + + // Unknown status code. + log.Println("unknown status", resp.Status) + log.Println("raw content:") + log.Println(resp.Dump()) // Record raw content when server returned unknown status code. +} ``` -Use `req.FileUpload` to fully control -``` go -file, _ := os.Open("imroc.png") -req.Post(url, req.FileUpload{ - File: file, - FieldName: "file", // FieldName is form field name - FileName: "avatar.png", //Filename is the name of the file that you wish to upload. We use this to guess the mimetype as well as pass it onto the server -}) + +Normally it will output (SuccessState): + +```txt +roc (https://imroc.cc) ``` -Use `req.UploadProgress` to listen upload progress + +## More Advanced GET + +You can set up a unified logic for error handling on the client, so that each time you send a request you only need to focus on the success situation, reducing duplicate code. + ```go -progress := func(current, total int64) { - fmt.Println(float32(current)/float32(total)*100, "%") +package main + +import ( + "fmt" + "github.com/imroc/req/v3" + "log" + "time" +) + +type ErrorMessage struct { + Message string `json:"message"` +} + +func (msg *ErrorMessage) Error() string { + return fmt.Sprintf("API Error: %s", msg.Message) } -req.Post(url, req.File("/Users/roc/Pictures/*.png"), req.UploadProgress(progress)) -fmt.Println("upload complete") -``` -## Download -``` go -r, _ := req.Get(url) -r.ToFile("imroc.png") +type UserInfo struct { + Name string `json:"name"` + Blog string `json:"blog"` +} + +var client = req.C(). + SetUserAgent("my-custom-client"). // Chainable client settings. + SetTimeout(5 * time.Second). + EnableDumpEachRequest(). + SetCommonErrorResult(&ErrorMessage{}). + OnAfterResponse(func(client *req.Client, resp *req.Response) error { + if resp.Err != nil { // There is an underlying error, e.g. network error or unmarshal error. + return nil + } + if errMsg, ok := resp.ErrorResult().(*ErrorMessage); ok { + resp.Err = errMsg // Convert api error into go error + return nil + } + if !resp.IsSuccessState() { + // Neither a success response nor a error response, record details to help troubleshooting + resp.Err = fmt.Errorf("bad status: %s\nraw content:\n%s", resp.Status, resp.Dump()) + } + return nil + }) + +func main() { + var userInfo UserInfo + resp, err := client.R(). + SetHeader("Accept", "application/vnd.github.v3+json"). // Chainable request settings + SetPathParam("username", "imroc"). + SetSuccessResult(&userInfo). // Unmarshal response body into userInfo automatically if status code is between 200 and 299. + Get("https://api.github.com/users/{username}") + + if err != nil { // Error handling. + log.Println("error:", err) + return + } + + if resp.IsSuccessState() { // Status code is between 200 and 299. + fmt.Printf("%s (%s)\n", userInfo.Name, userInfo.Blog) + } +} ``` -Use `req.DownloadProgress` to listen download progress + +## Simple POST + ```go -progress := func(current, total int64) { - fmt.Println(float32(current)/float32(total)*100, "%") +package main + +import ( + "fmt" + "github.com/imroc/req/v3" + "log" +) + +type Repo struct { + Name string `json:"name"` + Url string `json:"url"` } -r, _ := req.Get(url, req.DownloadProgress(progress)) -r.ToFile("hello.mp4") -fmt.Println("download complete") -``` -## Cookie -By default, the underlying `*http.Client` will manage your cookie(send cookie header to server automatically if server has set a cookie for you), you can disable it by calling this function : -``` go -req.EnableCookie(false) -``` -and you can set cookie in request just using `*http.Cookie` -``` go -cookie := new(http.Cookie) -// ...... -req.Get(url, cookie) +type Result struct { + Data string `json:"data"` +} + +func main() { + client := req.C().DevMode() + var result Result + + resp, err := client.R(). + SetBody(&Repo{Name: "req", Url: "https://github.com/imroc/req"}). + SetSuccessResult(&result). + Post("https://httpbin.org/post") + if err != nil { + log.Fatal(err) + } + + if !resp.IsSuccessState() { + fmt.Println("bad response status:", resp.Status) + return + } + fmt.Println("++++++++++++++++++++++++++++++++++++++++++++++++") + fmt.Println("data:", result.Data) + fmt.Println("++++++++++++++++++++++++++++++++++++++++++++++++") +} ``` -## Set Timeout -``` go -req.SetTimeout(50 * time.Second) +```txt +2022/05/19 20:11:00.151171 DEBUG [req] HTTP/2 POST https://httpbin.org/post +:authority: httpbin.org +:method: POST +:path: /post +:scheme: https +user-agent: req/v3 (https://github.com/imroc/req/v3) +content-type: application/json; charset=utf-8 +content-length: 55 +accept-encoding: gzip + +{"name":"req","website":"https://github.com/imroc/req"} + +:status: 200 +date: Thu, 19 May 2022 12:11:00 GMT +content-type: application/json +content-length: 651 +server: gunicorn/19.9.0 +access-control-allow-origin: * +access-control-allow-credentials: true + +{ + "args": {}, + "data": "{\"name\":\"req\",\"website\":\"https://github.com/imroc/req\"}", + "files": {}, + "form": {}, + "headers": { + "Accept-Encoding": "gzip", + "Content-Length": "55", + "Content-Type": "application/json; charset=utf-8", + "Host": "httpbin.org", + "User-Agent": "req/v3 (https://github.com/imroc/req/v3)", + "X-Amzn-Trace-Id": "Root=1-628633d4-7559d633152b4307288ead2e" + }, + "json": { + "name": "req", + "website": "https://github.com/imroc/req" + }, + "origin": "103.7.29.30", + "url": "https://httpbin.org/post" +} + +++++++++++++++++++++++++++++++++++++++++++++++++ +data: {"name":"req","url":"https://github.com/imroc/req"} +++++++++++++++++++++++++++++++++++++++++++++++++ ``` -## Set Proxy -By default, req use proxy from system environment if `http_proxy` or `https_proxy` is specified, you can set a custom proxy or disable it by set `nil` -``` go -req.SetProxy(func(r *http.Request) (*url.URL, error) { - if strings.Contains(r.URL.Hostname(), "google") { - return url.Parse("http://my.vpn.com:23456") +## Do API Style + +If you like, you can also use a Do API style like the following to make requests: + +```go +package main + +import ( + "fmt" + "github.com/imroc/req/v3" +) + +type APIResponse struct { + Origin string `json:"origin"` + Url string `json:"url"` +} + +func main() { + var resp APIResponse + c := req.C().SetBaseURL("https://httpbin.org/post") + err := c.Post(). + SetBody("hello"). + Do(). + Into(&resp) + if err != nil { + panic(err) } - return nil, nil -}) -``` -Set a simple proxy (use fixed proxy url for every request) -``` go -req.SetProxyUrl("http://my.proxy.com:23456") + fmt.Println("My IP is", resp.Origin) +} ``` -## Customize Client -Use `SetClient` to change the default underlying `*http.Client` -``` go -req.SetClient(client) +```txt +My IP is 182.138.155.113 ``` -Specify independent http client for some requests -``` go -client := &http.Client{Timeout: 30 * time.Second} -req.Get(url, client) -``` -Change some properties of default client you want -``` go -req.Client().Jar, _ = cookiejar.New(nil) -trans, _ := req.Client().Transport.(*http.Transport) -trans.MaxIdleConns = 20 -trans.TLSHandshakeTimeout = 20 * time.Second -trans.DisableKeepAlives = true -trans.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} + +* The order of chain calls is more intuitive: first call Client to create a request with a specified Method, then use chain calls to set the request, then use `Do()` to fire the request, return Response, and finally call `Response.Into` to unmarshal response body into specified object. +* `Response.Into` will return an error if an error occurs during sending the request or during unmarshalling. +* The url of some APIs is fixed, and different types of requests are implemented by passing different bodies. In this scenario, `Client.SetBaseURL` can be used to set a unified url, and there is no need to set the url for each request when initiating a request. Of course, you can also call `Request.SetURL` to set it if you need it. + +## Build SDK With Req + +Here is an example of building GitHub's SDK with req, using two styles (`GetUserProfile_Style1`, `GetUserProfile_Style2`). + +```go +import ( + "context" + "fmt" + "github.com/imroc/req/v3" +) + +type ErrorMessage struct { + Message string `json:"message"` +} + +// Error implements go error interface. +func (msg *ErrorMessage) Error() string { + return fmt.Sprintf("API Error: %s", msg.Message) +} + +type GithubClient struct { + *req.Client +} + +func NewGithubClient() *GithubClient { + return &GithubClient{ + Client: req.C(). + SetBaseURL("https://api.github.com"). + SetCommonErrorResult(&ErrorMessage{}). + EnableDumpEachRequest(). + OnAfterResponse(func(client *req.Client, resp *req.Response) error { + if resp.Err != nil { // There is an underlying error, e.g. network error or unmarshal error. + return nil + } + if errMsg, ok := resp.ErrorResult().(*ErrorMessage); ok { + resp.Err = errMsg // Convert api error into go error + return nil + } + if !resp.IsSuccessState() { + // Neither a success response nor a error response, record details to help troubleshooting + resp.Err = fmt.Errorf("bad status: %s\nraw content:\n%s", resp.Status, resp.Dump()) + } + return nil + }), + } +} + +type UserProfile struct { + Name string `json:"name"` + Blog string `json:"blog"` +} + +// GetUserProfile_Style1 returns the user profile for the specified user. +// Github API doc: https://docs.github.com/en/rest/users/users#get-a-user +func (c *GithubClient) GetUserProfile_Style1(ctx context.Context, username string) (user *UserProfile, err error) { + _, err = c.R(). + SetContext(ctx). + SetPathParam("username", username). + SetSuccessResult(&user). + Get("/users/{username}") + return +} + +// GetUserProfile_Style2 returns the user profile for the specified user. +// Github API doc: https://docs.github.com/en/rest/users/users#get-a-user +func (c *GithubClient) GetUserProfile_Style2(ctx context.Context, username string) (user *UserProfile, err error) { + err = c.Get("/users/{username}"). + SetPathParam("username", username). + Do(ctx). + Into(&user) + return +} ``` + +## Contributing + +If you have a bug report or feature request, you can [open an issue](https://github.com/imroc/req/issues/new), and [pull requests](https://github.com/imroc/req/pulls) are also welcome. + +## Contact + +If you have questions, feel free to reach out to us in the following ways: + +* [Github Discussion](https://github.com/imroc/req/discussions) +* [Slack](https://imroc-req.slack.com/archives/C03UFPGSNC8) | [Join](https://slack.req.cool/) + +## Sponsors + +If you like req and it really helps you, feel free to reward me with a cup of coffee, and don't forget to mention your github id. + + + + + + +
+ +
+ Wechat +
+ +
+ Alipay +
+ +Many thanks to the following sponsors: + + + + + + +
+ + +
+ M-Cosmosss 馃 +
+
+ + +
+ aadog 馃 +
+
+ +## License + +`Req` released under MIT license, refer [LICENSE](LICENSE) file. diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 00000000..4a5665da --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,9 @@ +# Security Policy + +## Supported Versions + +req version >= `v3.43.x` + +## Reporting a Vulnerability + +Email: roc@imroc.cc diff --git a/client.go b/client.go new file mode 100644 index 00000000..b1eea7d9 --- /dev/null +++ b/client.go @@ -0,0 +1,1752 @@ +package req + +import ( + "bytes" + "context" + "crypto/tls" + "crypto/x509" + "encoding/json" + "encoding/xml" + "errors" + "io" + "net" + "net/http" + "net/http/cookiejar" + urlpkg "net/url" + "os" + "reflect" + "strings" + "time" + + utls "github.com/refraction-networking/utls" + "golang.org/x/net/publicsuffix" + + "github.com/imroc/req/v3/http2" + "github.com/imroc/req/v3/internal/header" + "github.com/imroc/req/v3/internal/util" +) + +// DefaultClient returns the global default Client. +func DefaultClient() *Client { + return defaultClient +} + +// SetDefaultClient override the global default Client. +func SetDefaultClient(c *Client) { + if c != nil { + defaultClient = c + } +} + +var defaultClient = C() + +// Client is the req's http client. +type Client struct { + BaseURL string + PathParams map[string]string + QueryParams urlpkg.Values + FormData urlpkg.Values + DebugLog bool + AllowGetMethodPayload bool + *Transport + + cookiejarFactory func() *cookiejar.Jar + trace bool + disableAutoReadResponse bool + commonErrorType reflect.Type + retryOption *retryOption + jsonMarshal func(v interface{}) ([]byte, error) + jsonUnmarshal func(data []byte, v interface{}) error + xmlMarshal func(v interface{}) ([]byte, error) + xmlUnmarshal func(data []byte, v interface{}) error + multipartBoundaryFunc func() string + outputDirectory string + scheme string + log Logger + dumpOptions *DumpOptions + httpClient *http.Client + beforeRequest []RequestMiddleware + udBeforeRequest []RequestMiddleware + afterResponse []ResponseMiddleware + wrappedRoundTrip RoundTripper + roundTripWrappers []RoundTripWrapper + responseBodyTransformer func(rawBody []byte, req *Request, resp *Response) (transformedBody []byte, err error) + resultStateCheckFunc func(resp *Response) ResultState + onError ErrorHook +} + +type ErrorHook func(client *Client, req *Request, resp *Response, err error) + +// R create a new request. +func (c *Client) R() *Request { + return &Request{ + client: c, + retryOption: c.retryOption.Clone(), + } +} + +// Get create a new GET request, accepts 0 or 1 url. +func (c *Client) Get(url ...string) *Request { + r := c.R() + if len(url) > 0 { + r.RawURL = url[0] + } + r.Method = http.MethodGet + return r +} + +// Post create a new POST request. +func (c *Client) Post(url ...string) *Request { + r := c.R() + if len(url) > 0 { + r.RawURL = url[0] + } + r.Method = http.MethodPost + return r +} + +// Patch create a new PATCH request. +func (c *Client) Patch(url ...string) *Request { + r := c.R() + if len(url) > 0 { + r.RawURL = url[0] + } + r.Method = http.MethodPatch + return r +} + +// Delete create a new DELETE request. +func (c *Client) Delete(url ...string) *Request { + r := c.R() + if len(url) > 0 { + r.RawURL = url[0] + } + r.Method = http.MethodDelete + return r +} + +// Put create a new PUT request. +func (c *Client) Put(url ...string) *Request { + r := c.R() + if len(url) > 0 { + r.RawURL = url[0] + } + r.Method = http.MethodPut + return r +} + +// Head create a new HEAD request. +func (c *Client) Head(url ...string) *Request { + r := c.R() + if len(url) > 0 { + r.RawURL = url[0] + } + r.Method = http.MethodHead + return r +} + +// Options create a new OPTIONS request. +func (c *Client) Options(url ...string) *Request { + r := c.R() + if len(url) > 0 { + r.RawURL = url[0] + } + r.Method = http.MethodOptions + return r +} + +// GetTransport return the underlying transport. +func (c *Client) GetTransport() *Transport { + return c.Transport +} + +// SetResponseBodyTransformer set the response body transformer, which can modify the +// response body before unmarshalled if auto-read response body is not disabled. +func (c *Client) SetResponseBodyTransformer(fn func(rawBody []byte, req *Request, resp *Response) (transformedBody []byte, err error)) *Client { + c.responseBodyTransformer = fn + return c +} + +// SetCommonError set the common result that response body will be unmarshalled to +// if no error occurs but Response.ResultState returns ErrorState, by default it +// is HTTP status `code >= 400`, you can also use SetCommonResultStateChecker +// to customize the result state check logic. +// +// Deprecated: Use SetCommonErrorResult instead. +func (c *Client) SetCommonError(err interface{}) *Client { + return c.SetCommonErrorResult(err) +} + +// SetCommonErrorResult set the common result that response body will be unmarshalled to +// if no error occurs but Response.ResultState returns ErrorState, by default it +// is HTTP status `code >= 400`, you can also use SetCommonResultStateChecker +// to customize the result state check logic. +func (c *Client) SetCommonErrorResult(err interface{}) *Client { + if err != nil { + c.commonErrorType = util.GetType(err) + } + return c +} + +// ResultState represents the state of the result. +type ResultState int + +const ( + // SuccessState indicates the response is in success state, + // and result will be unmarshalled if Request.SetSuccessResult + // is called. + SuccessState ResultState = iota + // ErrorState indicates the response is in error state, + // and result will be unmarshalled if Request.SetErrorResult + // or Client.SetCommonErrorResult is called. + ErrorState + // UnknownState indicates the response is in unknown state, + // and handler will be invoked if Request.SetUnknownResultHandlerFunc + // or Client.SetCommonUnknownResultHandlerFunc is called. + UnknownState +) + +// SetResultStateCheckFunc overrides the default result state checker with customized one, +// which returns SuccessState when HTTP status `code >= 200 and <= 299`, and returns +// ErrorState when HTTP status `code >= 400`, otherwise returns UnknownState. +func (c *Client) SetResultStateCheckFunc(fn func(resp *Response) ResultState) *Client { + c.resultStateCheckFunc = fn + return c +} + +// SetCommonFormDataFromValues set the form data from url.Values for requests +// fired from the client which request method allows payload. +func (c *Client) SetCommonFormDataFromValues(data urlpkg.Values) *Client { + if c.FormData == nil { + c.FormData = urlpkg.Values{} + } + for k, v := range data { + for _, kv := range v { + c.FormData.Add(k, kv) + } + } + return c +} + +// SetCommonFormData set the form data from map for requests fired from the client +// which request method allows payload. +func (c *Client) SetCommonFormData(data map[string]string) *Client { + if c.FormData == nil { + c.FormData = urlpkg.Values{} + } + for k, v := range data { + c.FormData.Set(k, v) + } + return c +} + +// SetMultipartBoundaryFunc overrides the default function used to generate +// boundary delimiters for "multipart/form-data" requests with a customized one, +// which returns a boundary delimiter (without the two leading hyphens). +// +// Boundary delimiter may only contain certain ASCII characters, and must be +// non-empty and at most 70 bytes long (see RFC 2046, Section 5.1.1). +func (c *Client) SetMultipartBoundaryFunc(fn func() string) *Client { + c.multipartBoundaryFunc = fn + return c +} + +// SetBaseURL set the default base URL, will be used if request URL is +// a relative URL. +func (c *Client) SetBaseURL(u string) *Client { + c.BaseURL = strings.TrimRight(u, "/") + return c +} + +// SetOutputDirectory set output directory that response will +// be downloaded to. +func (c *Client) SetOutputDirectory(dir string) *Client { + c.outputDirectory = dir + return c +} + +// SetCertFromFile helps to set client certificates from cert and key file. +func (c *Client) SetCertFromFile(certFile, keyFile string) *Client { + cert, err := tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + c.log.Errorf("failed to load client cert: %v", err) + return c + } + config := c.GetTLSClientConfig() + config.Certificates = append(config.Certificates, cert) + return c +} + +// SetCerts set client certificates. +func (c *Client) SetCerts(certs ...tls.Certificate) *Client { + config := c.GetTLSClientConfig() + config.Certificates = append(config.Certificates, certs...) + return c +} + +func (c *Client) appendRootCertData(data []byte) { + config := c.GetTLSClientConfig() + if config.RootCAs == nil { + config.RootCAs = x509.NewCertPool() + } + config.RootCAs.AppendCertsFromPEM(data) +} + +// SetRootCertFromString set root certificates from string. +func (c *Client) SetRootCertFromString(pemContent string) *Client { + c.appendRootCertData([]byte(pemContent)) + return c +} + +// SetRootCertsFromFile set root certificates from files. +func (c *Client) SetRootCertsFromFile(pemFiles ...string) *Client { + for _, pemFile := range pemFiles { + rootPemData, err := os.ReadFile(pemFile) + if err != nil { + c.log.Errorf("failed to read root cert file: %v", err) + return c + } + c.appendRootCertData(rootPemData) + } + return c +} + +// GetTLSClientConfig return the underlying tls.Config. +func (c *Client) GetTLSClientConfig() *tls.Config { + if c.TLSClientConfig == nil { + c.TLSClientConfig = &tls.Config{ + NextProtos: []string{"h2", "http/1.1"}, + } + } + return c.TLSClientConfig +} + +// SetRedirectPolicy set the RedirectPolicy which controls the behavior of receiving redirect +// responses (usually responses with 301 and 302 status code), see the predefined +// AllowedDomainRedirectPolicy, AllowedHostRedirectPolicy, DefaultRedirectPolicy, MaxRedirectPolicy, +// NoRedirectPolicy, SameDomainRedirectPolicy and SameHostRedirectPolicy. +func (c *Client) SetRedirectPolicy(policies ...RedirectPolicy) *Client { + if len(policies) == 0 { + return c + } + c.httpClient.CheckRedirect = func(req *http.Request, via []*http.Request) error { + for _, f := range policies { + if f == nil { + continue + } + err := f(req, via) + if err != nil { + return err + } + } + if c.DebugLog { + c.log.Debugf(" %s %s", req.Method, req.URL.String()) + } + return nil + } + return c +} + +// DisableKeepAlives disable the HTTP keep-alives (enabled by default) +// and will only use the connection to the server for a single +// HTTP request. +// +// This is unrelated to the similarly named TCP keep-alives. +func (c *Client) DisableKeepAlives() *Client { + c.Transport.DisableKeepAlives = true + return c +} + +// EnableKeepAlives enables HTTP keep-alives (enabled by default). +func (c *Client) EnableKeepAlives() *Client { + c.Transport.DisableKeepAlives = false + return c +} + +// DisableCompression disables the compression (enabled by default), +// which prevents the Transport from requesting compression +// with an "Accept-Encoding: gzip" request header when the +// Request contains no existing Accept-Encoding value. If +// the Transport requests gzip on its own and gets a gzipped +// response, it's transparently decoded in the Response.Body. +// However, if the user explicitly requested gzip it is not +// automatically uncompressed. +func (c *Client) DisableCompression() *Client { + c.Transport.DisableCompression = true + return c +} + +// EnableCompression enables the compression (enabled by default). +func (c *Client) EnableCompression() *Client { + c.Transport.DisableCompression = false + return c +} + +// EnableAutoDecompress enables the automatic decompression (disabled by default). +func (c *Client) EnableAutoDecompress() *Client { + c.Transport.AutoDecompression = true + return c +} + +// DisableAutoDecompress disables the automatic decompression (disabled by default). +func (c *Client) DisableAutoDecompress() *Client { + c.Transport.AutoDecompression = false + return c +} + +// SetTLSClientConfig set the TLS client config. Be careful! Usually +// you don't need this, you can directly set the tls configuration with +// methods like EnableInsecureSkipVerify, SetCerts etc. Or you can call +// GetTLSClientConfig to get the current tls configuration to avoid +// overwriting some important configurations, such as not setting NextProtos +// will not use http2 by default. +func (c *Client) SetTLSClientConfig(conf *tls.Config) *Client { + c.TLSClientConfig = conf + return c +} + +// EnableInsecureSkipVerify enable send https without verifing +// the server's certificates (disabled by default). +func (c *Client) EnableInsecureSkipVerify() *Client { + c.GetTLSClientConfig().InsecureSkipVerify = true + return c +} + +// DisableInsecureSkipVerify disable send https without verifing +// the server's certificates (disabled by default). +func (c *Client) DisableInsecureSkipVerify() *Client { + c.GetTLSClientConfig().InsecureSkipVerify = false + return c +} + +// SetCommonQueryParams set URL query parameters with a map +// for requests fired from the client. +func (c *Client) SetCommonQueryParams(params map[string]string) *Client { + for k, v := range params { + c.SetCommonQueryParam(k, v) + } + return c +} + +// AddCommonQueryParam add a URL query parameter with a key-value +// pair for requests fired from the client. +func (c *Client) AddCommonQueryParam(key, value string) *Client { + if c.QueryParams == nil { + c.QueryParams = make(urlpkg.Values) + } + c.QueryParams.Add(key, value) + return c +} + +// AddCommonQueryParams add one or more values of specified URL query parameter +// for requests fired from the client. +func (c *Client) AddCommonQueryParams(key string, values ...string) *Client { + if c.QueryParams == nil { + c.QueryParams = make(urlpkg.Values) + } + vs := c.QueryParams[key] + vs = append(vs, values...) + c.QueryParams[key] = vs + return c +} + +func (c *Client) pathParams() map[string]string { + if c.PathParams == nil { + c.PathParams = make(map[string]string) + } + return c.PathParams +} + +// SetCommonPathParam set a path parameter for requests fired from the client. +func (c *Client) SetCommonPathParam(key, value string) *Client { + c.pathParams()[key] = value + return c +} + +// SetCommonPathParams set path parameters for requests fired from the client. +func (c *Client) SetCommonPathParams(pathParams map[string]string) *Client { + m := c.pathParams() + for k, v := range pathParams { + m[k] = v + } + return c +} + +// SetCommonQueryParam set a URL query parameter with a key-value +// pair for requests fired from the client. +func (c *Client) SetCommonQueryParam(key, value string) *Client { + if c.QueryParams == nil { + c.QueryParams = make(urlpkg.Values) + } + c.QueryParams.Set(key, value) + return c +} + +// SetCommonQueryString set URL query parameters with a raw query string +// for requests fired from the client. +func (c *Client) SetCommonQueryString(query string) *Client { + params, err := urlpkg.ParseQuery(strings.TrimSpace(query)) + if err != nil { + c.log.Warnf("failed to parse query string (%s): %v", query, err) + return c + } + if c.QueryParams == nil { + c.QueryParams = make(urlpkg.Values) + } + for p, v := range params { + for _, pv := range v { + c.QueryParams.Add(p, pv) + } + } + return c +} + +// SetCommonCookies set HTTP cookies for requests fired from the client. +func (c *Client) SetCommonCookies(cookies ...*http.Cookie) *Client { + c.Cookies = append(c.Cookies, cookies...) + return c +} + +// DisableDebugLog disable debug level log (disabled by default). +func (c *Client) DisableDebugLog() *Client { + c.DebugLog = false + return c +} + +// EnableDebugLog enable debug level log (disabled by default). +func (c *Client) EnableDebugLog() *Client { + c.DebugLog = true + return c +} + +// DevMode enables: +// 1. Dump content of all requests and responses to see details. +// 2. Output debug level log for deeper insights. +// 3. Trace all requests, so you can get trace info to analyze performance. +func (c *Client) DevMode() *Client { + return c.EnableDumpAll(). + EnableDebugLog(). + EnableTraceAll() +} + +// SetScheme set the default scheme for client, will be used when +// there is no scheme in the request URL (e.g. "github.com/imroc/req"). +func (c *Client) SetScheme(scheme string) *Client { + if !util.IsStringEmpty(scheme) { + c.scheme = strings.TrimSpace(scheme) + } + return c +} + +// GetLogger return the internal logger, usually used in middleware. +func (c *Client) GetLogger() Logger { + if c.log != nil { + return c.log + } + c.log = createDefaultLogger() + return c.log +} + +// SetLogger set the customized logger for client, will disable log if set to nil. +func (c *Client) SetLogger(log Logger) *Client { + if log == nil { + c.log = &disableLogger{} + return c + } + c.log = log + return c +} + +// SetTimeout set timeout for requests fired from the client. +func (c *Client) SetTimeout(d time.Duration) *Client { + c.httpClient.Timeout = d + return c +} + +func (c *Client) getDumpOptions() *DumpOptions { + if c.dumpOptions == nil { + c.dumpOptions = newDefaultDumpOptions() + } + return c.dumpOptions +} + +// EnableDumpAll enable dump for requests fired from the client, including +// all content for the request and response by default. +func (c *Client) EnableDumpAll() *Client { + if c.Dump != nil { // dump already started + return c + } + c.EnableDump(c.getDumpOptions()) + return c +} + +// EnableDumpAllToFile enable dump for requests fired from the +// client and output to the specified file. +func (c *Client) EnableDumpAllToFile(filename string) *Client { + file, err := os.Create(filename) + if err != nil { + c.log.Errorf("create dump file error: %v", err) + return c + } + c.getDumpOptions().Output = file + c.EnableDumpAll() + return c +} + +// EnableDumpAllTo enable dump for requests fired from the +// client and output to the specified io.Writer. +func (c *Client) EnableDumpAllTo(output io.Writer) *Client { + c.getDumpOptions().Output = output + c.EnableDumpAll() + return c +} + +// EnableDumpAllAsync enable dump for requests fired from the +// client and output asynchronously, can be used for debugging +// in production environment without affecting performance. +func (c *Client) EnableDumpAllAsync() *Client { + o := c.getDumpOptions() + o.Async = true + c.EnableDumpAll() + return c +} + +// EnableDumpAllWithoutRequestBody enable dump for requests fired +// from the client without request body, can be used in the upload +// request to avoid dumping the unreadable binary content. +func (c *Client) EnableDumpAllWithoutRequestBody() *Client { + o := c.getDumpOptions() + o.RequestBody = false + c.EnableDumpAll() + return c +} + +// EnableDumpAllWithoutResponseBody enable dump for requests fired +// from the client without response body, can be used in the download +// request to avoid dumping the unreadable binary content. +func (c *Client) EnableDumpAllWithoutResponseBody() *Client { + o := c.getDumpOptions() + o.ResponseBody = false + c.EnableDumpAll() + return c +} + +// EnableDumpAllWithoutResponse enable dump for requests fired from +// the client without response, can be used if you only care about +// the request. +func (c *Client) EnableDumpAllWithoutResponse() *Client { + o := c.getDumpOptions() + o.ResponseBody = false + o.ResponseHeader = false + c.EnableDumpAll() + return c +} + +// EnableDumpAllWithoutRequest enables dump for requests fired from +// the client without request, can be used if you only care about +// the response. +func (c *Client) EnableDumpAllWithoutRequest() *Client { + o := c.getDumpOptions() + o.RequestHeader = false + o.RequestBody = false + c.EnableDumpAll() + return c +} + +// EnableDumpAllWithoutHeader enable dump for requests fired from +// the client without header, can be used if you only care about +// the body. +func (c *Client) EnableDumpAllWithoutHeader() *Client { + o := c.getDumpOptions() + o.RequestHeader = false + o.ResponseHeader = false + c.EnableDumpAll() + return c +} + +// EnableDumpAllWithoutBody enable dump for requests fired from +// the client without body, can be used if you only care about +// the header. +func (c *Client) EnableDumpAllWithoutBody() *Client { + o := c.getDumpOptions() + o.RequestBody = false + o.ResponseBody = false + c.EnableDumpAll() + return c +} + +// EnableDumpEachRequest enable dump at the request-level for each request, and only +// temporarily stores the dump content in memory, call Response.Dump() to get the +// dump content when needed. +func (c *Client) EnableDumpEachRequest() *Client { + return c.OnBeforeRequest(func(client *Client, req *Request) error { + if req.RetryAttempt == 0 { // Ignore on retry, no need to repeat enable dump. + req.EnableDump() + } + return nil + }) +} + +// EnableDumpEachRequestWithoutBody enable dump without body at the request-level for +// each request, and only temporarily stores the dump content in memory, call +// Response.Dump() to get the dump content when needed. +func (c *Client) EnableDumpEachRequestWithoutBody() *Client { + return c.OnBeforeRequest(func(client *Client, req *Request) error { + if req.RetryAttempt == 0 { // Ignore on retry, no need to repeat enable dump. + req.EnableDumpWithoutBody() + } + return nil + }) +} + +// EnableDumpEachRequestWithoutHeader enable dump without header at the request-level for +// each request, and only temporarily stores the dump content in memory, call +// Response.Dump() to get the dump content when needed. +func (c *Client) EnableDumpEachRequestWithoutHeader() *Client { + return c.OnBeforeRequest(func(client *Client, req *Request) error { + if req.RetryAttempt == 0 { // Ignore on retry, no need to repeat enable dump. + req.EnableDumpWithoutHeader() + } + return nil + }) +} + +// EnableDumpEachRequestWithoutRequest enable dump without request at the request-level for +// each request, and only temporarily stores the dump content in memory, call +// Response.Dump() to get the dump content when needed. +func (c *Client) EnableDumpEachRequestWithoutRequest() *Client { + return c.OnBeforeRequest(func(client *Client, req *Request) error { + if req.RetryAttempt == 0 { // Ignore on retry, no need to repeat enable dump. + req.EnableDumpWithoutRequest() + } + return nil + }) +} + +// EnableDumpEachRequestWithoutResponse enable dump without response at the request-level for +// each request, and only temporarily stores the dump content in memory, call +// Response.Dump() to get the dump content when needed. +func (c *Client) EnableDumpEachRequestWithoutResponse() *Client { + return c.OnBeforeRequest(func(client *Client, req *Request) error { + if req.RetryAttempt == 0 { // Ignore on retry, no need to repeat enable dump. + req.EnableDumpWithoutResponse() + } + return nil + }) +} + +// EnableDumpEachRequestWithoutResponseBody enable dump without response body at the +// request-level for each request, and only temporarily stores the dump content in memory, +// call Response.Dump() to get the dump content when needed. +func (c *Client) EnableDumpEachRequestWithoutResponseBody() *Client { + return c.OnBeforeRequest(func(client *Client, req *Request) error { + if req.RetryAttempt == 0 { // Ignore on retry, no need to repeat enable dump. + req.EnableDumpWithoutResponseBody() + } + return nil + }) +} + +// EnableDumpEachRequestWithoutRequestBody enable dump without request body at the +// request-level for each request, and only temporarily stores the dump content in memory, +// call Response.Dump() to get the dump content when needed. +func (c *Client) EnableDumpEachRequestWithoutRequestBody() *Client { + return c.OnBeforeRequest(func(client *Client, req *Request) error { + if req.RetryAttempt == 0 { // Ignore on retry, no need to repeat enable dump. + req.EnableDumpWithoutRequestBody() + } + return nil + }) +} + +// NewRequest is the alias of R() +func (c *Client) NewRequest() *Request { + return c.R() +} + +func (c *Client) NewParallelDownload(url string) *ParallelDownload { + return &ParallelDownload{ + url: url, + client: c, + } +} + +// DisableAutoReadResponse disable read response body automatically (enabled by default). +func (c *Client) DisableAutoReadResponse() *Client { + c.disableAutoReadResponse = true + return c +} + +// EnableAutoReadResponse enable read response body automatically (enabled by default). +func (c *Client) EnableAutoReadResponse() *Client { + c.disableAutoReadResponse = false + return c +} + +// SetAutoDecodeContentType set the content types that will be auto-detected and decode to utf-8 +// (e.g. "json", "xml", "html", "text"). +func (c *Client) SetAutoDecodeContentType(contentTypes ...string) *Client { + c.Transport.SetAutoDecodeContentType(contentTypes...) + return c +} + +// SetAutoDecodeContentTypeFunc set the function that determines whether the specified `Content-Type` should be auto-detected and decode to utf-8. +func (c *Client) SetAutoDecodeContentTypeFunc(fn func(contentType string) bool) *Client { + c.Transport.SetAutoDecodeContentTypeFunc(fn) + return c +} + +// SetAutoDecodeAllContentType enable try auto-detect charset and decode all content type to utf-8. +func (c *Client) SetAutoDecodeAllContentType() *Client { + c.Transport.SetAutoDecodeAllContentType() + return c +} + +// DisableAutoDecode disable auto-detect charset and decode to utf-8 (enabled by default). +func (c *Client) DisableAutoDecode() *Client { + c.Transport.DisableAutoDecode() + return c +} + +// EnableAutoDecode enable auto-detect charset and decode to utf-8 (enabled by default). +func (c *Client) EnableAutoDecode() *Client { + c.Transport.EnableAutoDecode() + return c +} + +// SetUserAgent set the "User-Agent" header for requests fired from the client. +func (c *Client) SetUserAgent(userAgent string) *Client { + return c.SetCommonHeader(header.UserAgent, userAgent) +} + +// SetCommonBearerAuthToken set the bearer auth token for requests fired from the client. +func (c *Client) SetCommonBearerAuthToken(token string) *Client { + return c.SetCommonHeader(header.Authorization, "Bearer "+token) +} + +// SetCommonBasicAuth set the basic auth for requests fired from +// the client. +func (c *Client) SetCommonBasicAuth(username, password string) *Client { + c.SetCommonHeader(header.Authorization, util.BasicAuthHeaderValue(username, password)) + return c +} + +// SetCommonDigestAuth sets the Digest Access auth scheme for requests fired from the client. If a server responds with +// 401 and sends a Digest challenge in the WWW-Authenticate Header, requests will be resent with the appropriate +// Authorization Header. +// +// For Example: To set the Digest scheme with user "roc" and password "123456" +// +// client.SetCommonDigestAuth("roc", "123456") +// +// Information about Digest Access Authentication can be found in RFC7616: +// +// https://datatracker.ietf.org/doc/html/rfc7616 +// +// See `Request.SetDigestAuth` +func (c *Client) SetCommonDigestAuth(username, password string) *Client { + c.OnAfterResponse(handleDigestAuthFunc(username, password)) + return c +} + +// SetCommonHeaders set headers for requests fired from the client. +func (c *Client) SetCommonHeaders(hdrs map[string]string) *Client { + for k, v := range hdrs { + c.SetCommonHeader(k, v) + } + return c +} + +// SetCommonHeader set a header for requests fired from the client. +func (c *Client) SetCommonHeader(key, value string) *Client { + if c.Headers == nil { + c.Headers = make(http.Header) + } + c.Headers.Set(key, value) + return c +} + +// SetCommonHeaderNonCanonical set a header for requests fired from +// the client which key is a non-canonical key (keep case unchanged), +// only valid for HTTP/1.1. +func (c *Client) SetCommonHeaderNonCanonical(key, value string) *Client { + if c.Headers == nil { + c.Headers = make(http.Header) + } + c.Headers[key] = append(c.Headers[key], value) + return c +} + +// SetCommonHeadersNonCanonical set headers for requests fired from the +// client which key is a non-canonical key (keep case unchanged), only +// valid for HTTP/1.1. +func (c *Client) SetCommonHeadersNonCanonical(hdrs map[string]string) *Client { + for k, v := range hdrs { + c.SetCommonHeaderNonCanonical(k, v) + } + return c +} + +// SetCommonHeaderOrder set the order of the http header requests fired from the +// client (case-insensitive). +// For example: +// +// client.R().SetCommonHeaderOrder( +// "custom-header", +// "cookie", +// "user-agent", +// "accept-encoding", +// ).Get(url +func (c *Client) SetCommonHeaderOrder(keys ...string) *Client { + c.Transport.WrapRoundTripFunc(func(rt http.RoundTripper) HttpRoundTripFunc { + return func(req *http.Request) (resp *http.Response, err error) { + if req.Header == nil { + req.Header = make(http.Header) + } + req.Header[HeaderOderKey] = keys + return rt.RoundTrip(req) + } + }) + return c +} + +// SetCommonPseudoHeaderOder set the order of the pseudo http header requests fired +// from the client (case-insensitive). +// Note this is only valid for http2 and http3. +// For example: +// +// client.SetCommonPseudoHeaderOder( +// ":scheme", +// ":authority", +// ":path", +// ":method", +// ) +func (c *Client) SetCommonPseudoHeaderOder(keys ...string) *Client { + c.Transport.WrapRoundTripFunc(func(rt http.RoundTripper) HttpRoundTripFunc { + return func(req *http.Request) (resp *http.Response, err error) { + if req.Header == nil { + req.Header = make(http.Header) + } + req.Header[PseudoHeaderOderKey] = keys + return rt.RoundTrip(req) + } + }) + return c +} + +// SetHTTP2SettingsFrame set the ordered http2 settings frame. +func (c *Client) SetHTTP2SettingsFrame(settings ...http2.Setting) *Client { + c.Transport.SetHTTP2SettingsFrame(settings...) + return c +} + +// SetHTTP2ConnectionFlow set the default http2 connection flow, which is the increment +// value of initial WINDOW_UPDATE frame. +func (c *Client) SetHTTP2ConnectionFlow(flow uint32) *Client { + c.Transport.SetHTTP2ConnectionFlow(flow) + return c +} + +// SetHTTP2HeaderPriority set the header priority param. +func (c *Client) SetHTTP2HeaderPriority(priority http2.PriorityParam) *Client { + c.Transport.SetHTTP2HeaderPriority(priority) + return c +} + +// SetHTTP2PriorityFrames set the ordered http2 priority frames. +func (c *Client) SetHTTP2PriorityFrames(frames ...http2.PriorityFrame) *Client { + c.Transport.SetHTTP2PriorityFrames(frames...) + return c +} + +// SetCommonContentType set the `Content-Type` header for requests fired +// from the client. +func (c *Client) SetCommonContentType(ct string) *Client { + c.SetCommonHeader(header.ContentType, ct) + return c +} + +// DisableDumpAll disable dump for requests fired from the client. +func (c *Client) DisableDumpAll() *Client { + c.DisableDump() + return c +} + +// SetCommonDumpOptions configures the underlying Transport's DumpOptions +// for requests fired from the client. +func (c *Client) SetCommonDumpOptions(opt *DumpOptions) *Client { + if opt == nil { + return c + } + if opt.Output == nil { + if c.dumpOptions != nil { + opt.Output = c.dumpOptions.Output + } else { + opt.Output = os.Stdout + } + } + c.dumpOptions = opt + if c.Dump != nil { + c.Dump.SetOptions(dumpOptions{opt}) + } + return c +} + +// SetProxy set the proxy function. +func (c *Client) SetProxy(proxy func(*http.Request) (*urlpkg.URL, error)) *Client { + c.Transport.SetProxy(proxy) + return c +} + +// OnError set the error hook which will be executed if any error returned, +// even if the occurs before request is sent (e.g. invalid URL). +func (c *Client) OnError(hook ErrorHook) *Client { + c.onError = hook + return c +} + +// OnBeforeRequest add a request middleware which hooks before request sent. +func (c *Client) OnBeforeRequest(m RequestMiddleware) *Client { + c.udBeforeRequest = append(c.udBeforeRequest, m) + return c +} + +// OnAfterResponse add a response middleware which hooks after response received. +func (c *Client) OnAfterResponse(m ResponseMiddleware) *Client { + c.afterResponse = append(c.afterResponse, m) + return c +} + +// SetProxyURL set proxy from the proxy URL. +func (c *Client) SetProxyURL(proxyUrl string) *Client { + if proxyUrl == "" { + c.log.Warnf("ignore empty proxy url in SetProxyURL") + return c + } + u, err := urlpkg.Parse(proxyUrl) + if err != nil { + c.log.Errorf("failed to parse proxy url %s: %v", proxyUrl, err) + return c + } + proxy := http.ProxyURL(u) + c.SetProxy(proxy) + return c +} + +// DisableTraceAll disable trace for requests fired from the client. +func (c *Client) DisableTraceAll() *Client { + c.trace = false + return c +} + +// EnableTraceAll enable trace for requests fired from the client (http3 +// currently does not support trace). +func (c *Client) EnableTraceAll() *Client { + c.trace = true + return c +} + +// SetCookieJar set the cookie jar to the underlying `http.Client`, set to nil if you +// want to disable cookies. +// Note: If you use Client.Clone to clone a new Client, the new client will share the same +// cookie jar as the old Client after cloning. Use SetCookieJarFactory instead if you want +// to create a new CookieJar automatically when cloning a client. +func (c *Client) SetCookieJar(jar http.CookieJar) *Client { + c.cookiejarFactory = nil + c.httpClient.Jar = jar + return c +} + +// GetCookies get cookies from the underlying `http.Client`'s `CookieJar`. +func (c *Client) GetCookies(url string) ([]*http.Cookie, error) { + if c.httpClient.Jar == nil { + return nil, errors.New("cookie jar is not enabled") + } + u, err := urlpkg.Parse(url) + if err != nil { + return nil, err + } + return c.httpClient.Jar.Cookies(u), nil +} + +// ClearCookies clears all cookies if cookie is enabled, including +// cookies from cookie jar and cookies set by SetCommonCookies. +// Note: The cookie jar will not be cleared if you called SetCookieJar +// instead of SetCookieJarFactory. +func (c *Client) ClearCookies() *Client { + c.initCookieJar() + c.Cookies = nil + return c +} + +// SetJsonMarshal set the JSON marshal function which will be used +// to marshal request body. +func (c *Client) SetJsonMarshal(fn func(v interface{}) ([]byte, error)) *Client { + c.jsonMarshal = fn + return c +} + +// SetJsonUnmarshal set the JSON unmarshal function which will be used +// to unmarshal response body. +func (c *Client) SetJsonUnmarshal(fn func(data []byte, v interface{}) error) *Client { + c.jsonUnmarshal = fn + return c +} + +// SetXmlMarshal set the XML marshal function which will be used +// to marshal request body. +func (c *Client) SetXmlMarshal(fn func(v interface{}) ([]byte, error)) *Client { + c.xmlMarshal = fn + return c +} + +// SetXmlUnmarshal set the XML unmarshal function which will be used +// to unmarshal response body. +func (c *Client) SetXmlUnmarshal(fn func(data []byte, v interface{}) error) *Client { + c.xmlUnmarshal = fn + return c +} + +// SetDialTLS set the customized `DialTLSContext` function to Transport. +// Make sure the returned `conn` implements pkg/tls.Conn if you want your +// customized `conn` supports HTTP2. +func (c *Client) SetDialTLS(fn func(ctx context.Context, network, addr string) (net.Conn, error)) *Client { + c.Transport.SetDialTLS(fn) + return c +} + +// SetDial set the customized `DialContext` function to Transport. +func (c *Client) SetDial(fn func(ctx context.Context, network, addr string) (net.Conn, error)) *Client { + c.Transport.SetDial(fn) + return c +} + +// SetTLSFingerprintChrome uses tls fingerprint of Chrome browser. +func (c *Client) SetTLSFingerprintChrome() *Client { + return c.SetTLSFingerprint(utls.HelloChrome_Auto) +} + +// SetTLSFingerprintFirefox uses tls fingerprint of Firefox browser. +func (c *Client) SetTLSFingerprintFirefox() *Client { + return c.SetTLSFingerprint(utls.HelloFirefox_Auto) +} + +// SetTLSFingerprintEdge uses tls fingerprint of Edge browser. +func (c *Client) SetTLSFingerprintEdge() *Client { + return c.SetTLSFingerprint(utls.HelloEdge_Auto) +} + +// SetTLSFingerprintQQ uses tls fingerprint of QQ browser. +func (c *Client) SetTLSFingerprintQQ() *Client { + return c.SetTLSFingerprint(utls.HelloQQ_Auto) +} + +// SetTLSFingerprintSafari uses tls fingerprint of Safari browser. +func (c *Client) SetTLSFingerprintSafari() *Client { + return c.SetTLSFingerprint(utls.HelloSafari_Auto) +} + +// SetTLSFingerprint360 uses tls fingerprint of 360 browser. +func (c *Client) SetTLSFingerprint360() *Client { + return c.SetTLSFingerprint(utls.Hello360_Auto) +} + +// SetTLSFingerprintIOS uses tls fingerprint of IOS. +func (c *Client) SetTLSFingerprintIOS() *Client { + return c.SetTLSFingerprint(utls.HelloIOS_Auto) +} + +// SetTLSFingerprintAndroid uses tls fingerprint of Android. +func (c *Client) SetTLSFingerprintAndroid() *Client { + return c.SetTLSFingerprint(utls.HelloAndroid_11_OkHttp) +} + +// SetTLSFingerprintRandomized uses randomized tls fingerprint. +func (c *Client) SetTLSFingerprintRandomized() *Client { + return c.SetTLSFingerprint(utls.HelloRandomized) +} + +// uTLSConn is wrapper of UConn which implements the net.Conn interface. +type uTLSConn struct { + *utls.UConn +} + +func (conn *uTLSConn) ConnectionState() tls.ConnectionState { + cs := conn.Conn.ConnectionState() + return tls.ConnectionState{ + Version: cs.Version, + HandshakeComplete: cs.HandshakeComplete, + DidResume: cs.DidResume, + CipherSuite: cs.CipherSuite, + NegotiatedProtocol: cs.NegotiatedProtocol, + NegotiatedProtocolIsMutual: cs.NegotiatedProtocolIsMutual, + ServerName: cs.ServerName, + PeerCertificates: cs.PeerCertificates, + VerifiedChains: cs.VerifiedChains, + SignedCertificateTimestamps: cs.SignedCertificateTimestamps, + OCSPResponse: cs.OCSPResponse, + TLSUnique: cs.TLSUnique, + } +} + +// SetTLSFingerprint set the tls fingerprint for tls handshake, will use utls +// (https://github.com/refraction-networking/utls) to perform the tls handshake, +// which uses the specified clientHelloID to simulate the tls fingerprint. +// Note this is valid for HTTP1 and HTTP2, not HTTP3. +func (c *Client) SetTLSFingerprint(clientHelloID utls.ClientHelloID) *Client { + fn := func(ctx context.Context, addr string, plainConn net.Conn) (conn net.Conn, tlsState *tls.ConnectionState, err error) { + colonPos := strings.LastIndex(addr, ":") + if colonPos == -1 { + colonPos = len(addr) + } + hostname := addr[:colonPos] + tlsConfig := c.GetTLSClientConfig() + utlsConfig := &utls.Config{ + ServerName: hostname, + Rand: tlsConfig.Rand, + Time: tlsConfig.Time, + RootCAs: tlsConfig.RootCAs, + NextProtos: tlsConfig.NextProtos, + ClientCAs: tlsConfig.ClientCAs, + InsecureSkipVerify: tlsConfig.InsecureSkipVerify, + CipherSuites: tlsConfig.CipherSuites, + SessionTicketsDisabled: tlsConfig.SessionTicketsDisabled, + MinVersion: tlsConfig.MinVersion, + MaxVersion: tlsConfig.MaxVersion, + DynamicRecordSizingDisabled: tlsConfig.DynamicRecordSizingDisabled, + KeyLogWriter: tlsConfig.KeyLogWriter, + } + uconn := &uTLSConn{utls.UClient(plainConn, utlsConfig, clientHelloID)} + err = uconn.HandshakeContext(ctx) + if err != nil { + return + } + cs := uconn.Conn.ConnectionState() + conn = uconn + tlsState = &tls.ConnectionState{ + Version: cs.Version, + HandshakeComplete: cs.HandshakeComplete, + DidResume: cs.DidResume, + CipherSuite: cs.CipherSuite, + NegotiatedProtocol: cs.NegotiatedProtocol, + NegotiatedProtocolIsMutual: cs.NegotiatedProtocolIsMutual, + ServerName: cs.ServerName, + PeerCertificates: cs.PeerCertificates, + VerifiedChains: cs.VerifiedChains, + SignedCertificateTimestamps: cs.SignedCertificateTimestamps, + OCSPResponse: cs.OCSPResponse, + TLSUnique: cs.TLSUnique, + } + return + } + c.Transport.SetTLSHandshake(fn) + return c +} + +// SetTLSHandshake set the custom tls handshake function, only valid for HTTP1 and HTTP2, not HTTP3, +// it specifies an optional dial function for tls handshake, it works even if a proxy is set, can be +// used to customize the tls fingerprint. +func (c *Client) SetTLSHandshake(fn func(ctx context.Context, addr string, plainConn net.Conn) (conn net.Conn, tlsState *tls.ConnectionState, err error)) *Client { + c.Transport.SetTLSHandshake(fn) + return c +} + +// SetTLSHandshakeTimeout set the TLS handshake timeout. +func (c *Client) SetTLSHandshakeTimeout(timeout time.Duration) *Client { + c.Transport.SetTLSHandshakeTimeout(timeout) + return c +} + +// EnableForceHTTP1 enable force using HTTP1 (disabled by default). +// +// Attention: This method should not be called when ImpersonateXXX, SetTLSFingerPrint or +// SetTLSHandshake and other methods that will customize the tls handshake are called. +func (c *Client) EnableForceHTTP1() *Client { + c.Transport.EnableForceHTTP1() + return c +} + +// EnableForceHTTP2 enable force using HTTP2 for https requests (disabled by default). +// +// Attention: This method should not be called when ImpersonateXXX, SetTLSFingerPrint or +// SetTLSHandshake and other methods that will customize the tls handshake are called. +func (c *Client) EnableForceHTTP2() *Client { + c.Transport.EnableForceHTTP2() + return c +} + +// EnableForceHTTP3 enable force using HTTP3 for https requests (disabled by default). +// +// Attention: This method should not be called when ImpersonateXXX, SetTLSFingerPrint or +// SetTLSHandshake and other methods that will customize the tls handshake are called. +func (c *Client) EnableForceHTTP3() *Client { + c.Transport.EnableForceHTTP3() + return c +} + +// DisableForceHttpVersion disable force using specified http +// version (disabled by default). +func (c *Client) DisableForceHttpVersion() *Client { + c.Transport.DisableForceHttpVersion() + return c +} + +// EnableH2C enables HTTP/2 over TCP without TLS. +func (c *Client) EnableH2C() *Client { + c.Transport.EnableH2C() + return c +} + +// DisableH2C disables HTTP/2 over TCP without TLS. +func (c *Client) DisableH2C() *Client { + c.Transport.DisableH2C() + return c +} + +// DisableAllowGetMethodPayload disable sending GET method requests with body. +func (c *Client) DisableAllowGetMethodPayload() *Client { + c.AllowGetMethodPayload = false + return c +} + +// EnableAllowGetMethodPayload allows sending GET method requests with body. +func (c *Client) EnableAllowGetMethodPayload() *Client { + c.AllowGetMethodPayload = true + return c +} + +func (c *Client) isPayloadForbid(m string) bool { + return (m == http.MethodGet && !c.AllowGetMethodPayload) || m == http.MethodHead || m == http.MethodOptions +} + +// GetClient returns the underlying `http.Client`. +func (c *Client) GetClient() *http.Client { + return c.httpClient +} + +func (c *Client) getRetryOption() *retryOption { + if c.retryOption == nil { + c.retryOption = newDefaultRetryOption() + } + return c.retryOption +} + +// SetCommonRetryCount enables retry and set the maximum retry count for requests +// fired from the client. +// It will retry infinitely if count is negative. +func (c *Client) SetCommonRetryCount(count int) *Client { + c.getRetryOption().MaxRetries = count + return c +} + +// SetCommonRetryInterval sets the custom GetRetryIntervalFunc for requests fired +// from the client, you can use this to implement your own backoff retry algorithm. +// For example: +// +// req.SetCommonRetryInterval(func(resp *req.Response, attempt int) time.Duration { +// sleep := 0.01 * math.Exp2(float64(attempt)) +// return time.Duration(math.Min(2, sleep)) * time.Second +// }) +func (c *Client) SetCommonRetryInterval(getRetryIntervalFunc GetRetryIntervalFunc) *Client { + c.getRetryOption().GetRetryInterval = getRetryIntervalFunc + return c +} + +// SetCommonRetryFixedInterval set retry to use a fixed interval for requests +// fired from the client. +func (c *Client) SetCommonRetryFixedInterval(interval time.Duration) *Client { + c.getRetryOption().GetRetryInterval = func(resp *Response, attempt int) time.Duration { + return interval + } + return c +} + +// SetCommonRetryBackoffInterval set retry to use a capped exponential backoff +// with jitter for requests fired from the client. +// https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/ +func (c *Client) SetCommonRetryBackoffInterval(min, max time.Duration) *Client { + c.getRetryOption().GetRetryInterval = backoffInterval(min, max) + return c +} + +// SetCommonRetryHook set the retry hook which will be executed before a retry. +// It will override other retry hooks if any been added before. +func (c *Client) SetCommonRetryHook(hook RetryHookFunc) *Client { + c.getRetryOption().RetryHooks = []RetryHookFunc{hook} + return c +} + +// AddCommonRetryHook adds a retry hook for requests fired from the client, +// which will be executed before a retry. +func (c *Client) AddCommonRetryHook(hook RetryHookFunc) *Client { + ro := c.getRetryOption() + ro.RetryHooks = append(ro.RetryHooks, hook) + return c +} + +// SetCommonRetryCondition sets the retry condition, which determines whether the +// request should retry. +// It will override other retry conditions if any been added before. +func (c *Client) SetCommonRetryCondition(condition RetryConditionFunc) *Client { + c.getRetryOption().RetryConditions = []RetryConditionFunc{condition} + return c +} + +// AddCommonRetryCondition adds a retry condition, which determines whether the +// request should retry. +func (c *Client) AddCommonRetryCondition(condition RetryConditionFunc) *Client { + ro := c.getRetryOption() + ro.RetryConditions = append(ro.RetryConditions, condition) + return c +} + +// SetUnixSocket set client to dial connection use unix socket. +// For example: +// +// client.SetUnixSocket("/var/run/custom.sock") +func (c *Client) SetUnixSocket(file string) *Client { + return c.SetDial(func(ctx context.Context, network, addr string) (net.Conn, error) { + var d net.Dialer + return d.DialContext(ctx, "unix", file) + }) +} + +// DisableHTTP3 disables the http3 protocol. +func (c *Client) DisableHTTP3() *Client { + c.Transport.DisableHTTP3() + return c +} + +// EnableHTTP3 enables the http3 protocol. +func (c *Client) EnableHTTP3() *Client { + c.Transport.EnableHTTP3() + return c +} + +// SetHTTP2MaxHeaderListSize set the http2 MaxHeaderListSize, +// which is the http2 SETTINGS_MAX_HEADER_LIST_SIZE to +// send in the initial settings frame. It is how many bytes +// of response headers are allowed. Unlike the http2 spec, zero here +// means to use a default limit (currently 10MB). If you actually +// want to advertise an unlimited value to the peer, Transport +// interprets the highest possible value here (0xffffffff or 1<<32-1) +// to mean no limit. +func (c *Client) SetHTTP2MaxHeaderListSize(max uint32) *Client { + c.Transport.SetHTTP2MaxHeaderListSize(max) + return c +} + +// SetHTTP2StrictMaxConcurrentStreams set the http2 +// StrictMaxConcurrentStreams, which controls whether the +// server's SETTINGS_MAX_CONCURRENT_STREAMS should be respected +// globally. If false, new TCP connections are created to the +// server as needed to keep each under the per-connection +// SETTINGS_MAX_CONCURRENT_STREAMS limit. If true, the +// server's SETTINGS_MAX_CONCURRENT_STREAMS is interpreted as +// a global limit and callers of RoundTrip block when needed, +// waiting for their turn. +func (c *Client) SetHTTP2StrictMaxConcurrentStreams(strict bool) *Client { + c.Transport.SetHTTP2StrictMaxConcurrentStreams(strict) + return c +} + +// SetHTTP2ReadIdleTimeout set the http2 ReadIdleTimeout, +// which is the timeout after which a health check using ping +// frame will be carried out if no frame is received on the connection. +// Note that a ping response will is considered a received frame, so if +// there is no other traffic on the connection, the health check will +// be performed every ReadIdleTimeout interval. +// If zero, no health check is performed. +func (c *Client) SetHTTP2ReadIdleTimeout(timeout time.Duration) *Client { + c.Transport.SetHTTP2ReadIdleTimeout(timeout) + return c +} + +// SetHTTP2PingTimeout set the http2 PingTimeout, which is the timeout +// after which the connection will be closed if a response to Ping is +// not received. +// Defaults to 15s +func (c *Client) SetHTTP2PingTimeout(timeout time.Duration) *Client { + c.Transport.SetHTTP2PingTimeout(timeout) + return c +} + +// SetHTTP2WriteByteTimeout set the http2 WriteByteTimeout, which is the +// timeout after which the connection will be closed no data can be written +// to it. The timeout begins when data is available to write, and is +// extended whenever any bytes are written. +func (c *Client) SetHTTP2WriteByteTimeout(timeout time.Duration) *Client { + c.Transport.SetHTTP2WriteByteTimeout(timeout) + return c +} + +// NewClient is the alias of C +func NewClient() *Client { + return C() +} + +// Clone copy and returns the Client +func (c *Client) Clone() *Client { + cc := *c + + // clone Transport + cc.Transport = c.Transport.Clone() + cc.initTransport() + + // clone http.Client + client := *c.httpClient + client.Transport = cc.Transport + cc.httpClient = &client + cc.initCookieJar() + + // clone client middleware + if len(cc.roundTripWrappers) > 0 { + cc.wrappedRoundTrip = roundTripImpl{&cc} + for _, w := range cc.roundTripWrappers { + cc.wrappedRoundTrip = w(cc.wrappedRoundTrip) + } + } + + // clone other fields that may need to be cloned + cc.PathParams = cloneMap(c.PathParams) + cc.QueryParams = cloneUrlValues(c.QueryParams) + cc.FormData = cloneUrlValues(c.FormData) + cc.beforeRequest = cloneSlice(c.beforeRequest) + cc.udBeforeRequest = cloneSlice(c.udBeforeRequest) + cc.afterResponse = cloneSlice(c.afterResponse) + cc.dumpOptions = c.dumpOptions.Clone() + cc.retryOption = c.retryOption.Clone() + return &cc +} + +func memoryCookieJarFactory() *cookiejar.Jar { + jar, _ := cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List}) + return jar +} + +// C create a new client. +func C() *Client { + t := T() + + httpClient := &http.Client{ + Transport: t, + Timeout: 2 * time.Minute, + } + beforeRequest := []RequestMiddleware{ + parseRequestHeader, + parseRequestCookie, + parseRequestURL, + parseRequestBody, + } + afterResponse := []ResponseMiddleware{ + parseResponseBody, + handleDownload, + } + c := &Client{ + AllowGetMethodPayload: true, + beforeRequest: beforeRequest, + afterResponse: afterResponse, + log: createDefaultLogger(), + httpClient: httpClient, + Transport: t, + jsonMarshal: json.Marshal, + jsonUnmarshal: json.Unmarshal, + xmlMarshal: xml.Marshal, + xmlUnmarshal: xml.Unmarshal, + cookiejarFactory: memoryCookieJarFactory, + } + c.SetRedirectPolicy(DefaultRedirectPolicy()) + c.initCookieJar() + + c.initTransport() + return c +} + +// SetCookieJarFactory set the functional factory of cookie jar, which creates +// cookie jar that store cookies for underlying `http.Client`. After client clone, +// the cookie jar of the new client will also be regenerated using this factory +// function. +func (c *Client) SetCookieJarFactory(factory func() *cookiejar.Jar) *Client { + c.cookiejarFactory = factory + c.initCookieJar() + return c +} + +func (c *Client) initCookieJar() { + if c.cookiejarFactory == nil { + return + } + jar := c.cookiejarFactory() + if jar != nil { + c.httpClient.Jar = jar + } +} + +func (c *Client) initTransport() { + c.Debugf = func(format string, v ...interface{}) { + if c.DebugLog { + c.log.Debugf(format, v...) + } + } +} + +// RoundTripper is the interface of req's Client. +type RoundTripper interface { + RoundTrip(*Request) (*Response, error) +} + +// RoundTripFunc is a RoundTripper implementation, which is a simple function. +type RoundTripFunc func(req *Request) (resp *Response, err error) + +// RoundTrip implements RoundTripper. +func (fn RoundTripFunc) RoundTrip(req *Request) (*Response, error) { + return fn(req) +} + +// RoundTripWrapper is client middleware function. +type RoundTripWrapper func(rt RoundTripper) RoundTripper + +// RoundTripWrapperFunc is client middleware function, more convenient than RoundTripWrapper. +type RoundTripWrapperFunc func(rt RoundTripper) RoundTripFunc + +func (f RoundTripWrapperFunc) wrapper() RoundTripWrapper { + return func(rt RoundTripper) RoundTripper { + return f(rt) + } +} + +// WrapRoundTripFunc adds a client middleware function that will give the caller +// an opportunity to wrap the underlying http.RoundTripper. +func (c *Client) WrapRoundTripFunc(funcs ...RoundTripWrapperFunc) *Client { + var wrappers []RoundTripWrapper + for _, fn := range funcs { + wrappers = append(wrappers, fn.wrapper()) + } + return c.WrapRoundTrip(wrappers...) +} + +type roundTripImpl struct { + *Client +} + +func (r roundTripImpl) RoundTrip(req *Request) (resp *Response, err error) { + return r.roundTrip(req) +} + +// WrapRoundTrip adds a client middleware function that will give the caller +// an opportunity to wrap the underlying http.RoundTripper. +func (c *Client) WrapRoundTrip(wrappers ...RoundTripWrapper) *Client { + if len(wrappers) == 0 { + return c + } + if c.wrappedRoundTrip == nil { + c.roundTripWrappers = wrappers + c.wrappedRoundTrip = roundTripImpl{c} + } else { + c.roundTripWrappers = append(c.roundTripWrappers, wrappers...) + } + for _, w := range wrappers { + c.wrappedRoundTrip = w(c.wrappedRoundTrip) + } + return c +} + +// RoundTrip implements RoundTripper +func (c *Client) roundTrip(r *Request) (resp *Response, err error) { + resp = &Response{Request: r} + defer func() { + if err != nil { + resp.Err = err + } else { + err = resp.Err + } + }() + + // setup trace + if r.trace == nil && r.client.trace { + r.trace = &clientTrace{} + } + + ctx := r.ctx + + if r.trace != nil { + ctx = r.trace.createContext(r.Context()) + } + + // setup url and host + var host string + if h := r.getHeader("Host"); h != "" { + host = h // Host header override + } else { + host = r.URL.Host + } + + // setup header + contentLength := int64(len(r.Body)) + + var reqBody io.ReadCloser + if r.GetBody != nil { + reqBody, resp.Err = r.GetBody() + if resp.Err != nil { + return + } + } + req := &http.Request{ + Method: r.Method, + Header: r.Headers.Clone(), + URL: r.URL, + Host: host, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + ContentLength: contentLength, + Body: reqBody, + GetBody: r.GetBody, + Close: r.close, + } + for _, cookie := range r.Cookies { + req.AddCookie(cookie) + } + if r.isSaveResponse && r.downloadCallback != nil { + var wrap wrapResponseBodyFunc = func(rc io.ReadCloser) io.ReadCloser { + return &callbackReader{ + ReadCloser: rc, + callback: func(read int64) { + r.downloadCallback(DownloadInfo{ + Response: resp, + DownloadedSize: read, + }) + }, + lastTime: time.Now(), + interval: r.downloadCallbackInterval, + } + } + if ctx == nil { + ctx = context.Background() + } + ctx = context.WithValue(ctx, wrapResponseBodyKey, wrap) + } + if ctx != nil { + req = req.WithContext(ctx) + } + r.RawRequest = req + r.StartTime = time.Now() + + var httpResponse *http.Response + httpResponse, resp.Err = c.httpClient.Do(r.RawRequest) + resp.Response = httpResponse + + // auto-read response body if possible + if resp.Err == nil && !c.disableAutoReadResponse && !r.isSaveResponse && !r.disableAutoReadResponse && resp.StatusCode > 199 { + resp.ToBytes() + // restore body for re-reads + resp.Body = io.NopCloser(bytes.NewReader(resp.body)) + } + + for _, f := range c.afterResponse { + if e := f(c, resp); e != nil { + resp.Err = e + } + } + return +} diff --git a/client_impersonate.go b/client_impersonate.go new file mode 100644 index 00000000..dfa9235e --- /dev/null +++ b/client_impersonate.go @@ -0,0 +1,324 @@ +package req + +import ( + "crypto/rand" + "encoding/binary" + "math/big" + "strconv" + "strings" + + "github.com/imroc/req/v3/http2" + utls "github.com/refraction-networking/utls" +) + +// Identical for both Blink-based browsers (Chrome, Chromium, etc.) and WebKit-based browsers (Safari, etc.) +// Blink implementation: https://source.chromium.org/chromium/chromium/src/+/main:third_party/blink/renderer/platform/network/form_data_encoder.cc;drc=1d694679493c7b2f7b9df00e967b4f8699321093;l=130 +// WebKit implementation: https://github.com/WebKit/WebKit/blob/47eea119fe9462721e5cc75527a4280c6d5f5214/Source/WebCore/platform/network/FormDataBuilder.cpp#L120 +func webkitMultipartBoundaryFunc() string { + const letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789AB" + + sb := strings.Builder{} + sb.WriteString("----WebKitFormBoundary") + + for i := 0; i < 16; i++ { + index, err := rand.Int(rand.Reader, big.NewInt(int64(len(letters)-1))) + if err != nil { + panic(err) + } + + sb.WriteByte(letters[index.Int64()]) + } + + return sb.String() +} + +// Firefox implementation: https://searchfox.org/mozilla-central/source/dom/html/HTMLFormSubmission.cpp#355 +func firefoxMultipartBoundaryFunc() string { + sb := strings.Builder{} + sb.WriteString("-------------------------") + + for i := 0; i < 3; i++ { + var b [8]byte + if _, err := rand.Read(b[:]); err != nil { + panic(err) + } + u32 := binary.LittleEndian.Uint32(b[:]) + s := strconv.FormatUint(uint64(u32), 10) + + sb.WriteString(s) + } + + return sb.String() +} + +var ( + chromeHttp2Settings = []http2.Setting{ + { + ID: http2.SettingHeaderTableSize, + Val: 65536, + }, + { + ID: http2.SettingEnablePush, + Val: 0, + }, + { + ID: http2.SettingMaxConcurrentStreams, + Val: 1000, + }, + { + ID: http2.SettingInitialWindowSize, + Val: 6291456, + }, + { + ID: http2.SettingMaxHeaderListSize, + Val: 262144, + }, + } + + chromePseudoHeaderOrder = []string{ + ":method", + ":authority", + ":scheme", + ":path", + } + + chromeHeaderOrder = []string{ + "host", + "pragma", + "cache-control", + "sec-ch-ua", + "sec-ch-ua-mobile", + "sec-ch-ua-platform", + "upgrade-insecure-requests", + "user-agent", + "accept", + "sec-fetch-site", + "sec-fetch-mode", + "sec-fetch-user", + "sec-fetch-dest", + "referer", + "accept-encoding", + "accept-language", + "cookie", + } + + chromeHeaders = map[string]string{ + "pragma": "no-cache", + "cache-control": "no-cache", + "sec-ch-ua": `"Not_A Brand";v="8", "Chromium";v="120", "Google Chrome";v="120"`, + "sec-ch-ua-mobile": "?0", + "sec-ch-ua-platform": `"macOS"`, + "upgrade-insecure-requests": "1", + "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36", + "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7", + "sec-fetch-site": "none", + "sec-fetch-mode": "navigate", + "sec-fetch-user": "?1", + "sec-fetch-dest": "document", + "accept-language": "zh-CN,zh;q=0.9", + } + + chromeHeaderPriority = http2.PriorityParam{ + StreamDep: 0, + Exclusive: true, + Weight: 255, + } +) + +// ImpersonateChrome impersonates Chrome browser (version 120). +func (c *Client) ImpersonateChrome() *Client { + c. + SetTLSFingerprint(utls.HelloChrome_120). + SetHTTP2SettingsFrame(chromeHttp2Settings...). + SetHTTP2ConnectionFlow(15663105). + SetCommonPseudoHeaderOder(chromePseudoHeaderOrder...). + SetCommonHeaderOrder(chromeHeaderOrder...). + SetCommonHeaders(chromeHeaders). + SetHTTP2HeaderPriority(chromeHeaderPriority). + SetMultipartBoundaryFunc(webkitMultipartBoundaryFunc) + return c +} + +var ( + firefoxHttp2Settings = []http2.Setting{ + { + ID: http2.SettingHeaderTableSize, + Val: 65536, + }, + { + ID: http2.SettingInitialWindowSize, + Val: 131072, + }, + { + ID: http2.SettingMaxFrameSize, + Val: 16384, + }, + } + + firefoxPriorityFrames = []http2.PriorityFrame{ + { + StreamID: 3, + PriorityParam: http2.PriorityParam{ + StreamDep: 0, + Exclusive: false, + Weight: 200, + }, + }, + { + StreamID: 5, + PriorityParam: http2.PriorityParam{ + StreamDep: 0, + Exclusive: false, + Weight: 100, + }, + }, + { + StreamID: 7, + PriorityParam: http2.PriorityParam{ + StreamDep: 0, + Exclusive: false, + Weight: 0, + }, + }, + { + StreamID: 9, + PriorityParam: http2.PriorityParam{ + StreamDep: 7, + Exclusive: false, + Weight: 0, + }, + }, + { + StreamID: 11, + PriorityParam: http2.PriorityParam{ + StreamDep: 3, + Exclusive: false, + Weight: 0, + }, + }, + { + StreamID: 13, + PriorityParam: http2.PriorityParam{ + StreamDep: 0, + Exclusive: false, + Weight: 240, + }, + }, + } + + firefoxPseudoHeaderOrder = []string{ + ":method", + ":path", + ":authority", + ":scheme", + } + + firefoxHeaderOrder = []string{ + "user-agent", + "accept", + "accept-language", + "accept-encoding", + "referer", + "cookie", + "upgrade-insecure-requests", + "sec-fetch-dest", + "sec-fetch-mode", + "sec-fetch-site", + "sec-fetch-user", + "te", + } + + firefoxHeaders = map[string]string{ + "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:120.0) Gecko/20100101 Firefox/120.0", + "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8", + "accept-language": "zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2", + "upgrade-insecure-requests": "1", + "sec-fetch-dest": "document", + "sec-fetch-mode": "navigate", + "sec-fetch-site": "same-origin", + "sec-fetch-user": "?1", + //"te": "trailers", + } + + firefoxHeaderPriority = http2.PriorityParam{ + StreamDep: 13, + Exclusive: false, + Weight: 41, + } +) + +// ImpersonateFirefox impersonates Firefox browser (version 120). +func (c *Client) ImpersonateFirefox() *Client { + c. + SetTLSFingerprint(utls.HelloFirefox_120). + SetHTTP2SettingsFrame(firefoxHttp2Settings...). + SetHTTP2ConnectionFlow(12517377). + SetHTTP2PriorityFrames(firefoxPriorityFrames...). + SetCommonPseudoHeaderOder(firefoxPseudoHeaderOrder...). + SetCommonHeaderOrder(firefoxHeaderOrder...). + SetCommonHeaders(firefoxHeaders). + SetHTTP2HeaderPriority(firefoxHeaderPriority). + SetMultipartBoundaryFunc(firefoxMultipartBoundaryFunc) + return c +} + +var ( + safariHttp2Settings = []http2.Setting{ + { + ID: http2.SettingInitialWindowSize, + Val: 4194304, + }, + { + ID: http2.SettingMaxConcurrentStreams, + Val: 100, + }, + } + + safariPseudoHeaderOrder = []string{ + ":method", + ":scheme", + ":path", + ":authority", + } + + safariHeaderOrder = []string{ + "accept", + "sec-fetch-site", + "cookie", + "sec-fetch-dest", + "accept-language", + "sec-fetch-mode", + "user-agent", + "referer", + "accept-encoding", + } + + safariHeaders = map[string]string{ + "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", + "sec-fetch-site": "same-origin", + "sec-fetch-dest": "document", + "accept-language": "zh-CN,zh-Hans;q=0.9", + "sec-fetch-mode": "navigate", + "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.6 Safari/605.1.15", + } + + safariHeaderPriority = http2.PriorityParam{ + StreamDep: 0, + Exclusive: false, + Weight: 254, + } +) + +// ImpersonateSafari impersonates Safari browser (version 16.6). +func (c *Client) ImpersonateSafari() *Client { + c. + SetTLSFingerprint(utls.HelloSafari_16_0). + SetHTTP2SettingsFrame(safariHttp2Settings...). + SetHTTP2ConnectionFlow(10485760). + SetCommonPseudoHeaderOder(safariPseudoHeaderOrder...). + SetCommonHeaderOrder(safariHeaderOrder...). + SetCommonHeaders(safariHeaders). + SetHTTP2HeaderPriority(safariHeaderPriority). + SetMultipartBoundaryFunc(webkitMultipartBoundaryFunc) + return c +} diff --git a/client_test.go b/client_test.go new file mode 100644 index 00000000..7a6aeebe --- /dev/null +++ b/client_test.go @@ -0,0 +1,698 @@ +package req + +import ( + "bytes" + "context" + "crypto/tls" + "errors" + "fmt" + "io" + "net" + "net/http" + "net/http/cookiejar" + "net/url" + "os" + "regexp" + "strings" + "testing" + "time" + + "github.com/imroc/req/v3/internal/header" + "github.com/imroc/req/v3/internal/tests" + "golang.org/x/net/publicsuffix" +) + +func TestRetryCancelledContext(t *testing.T) { + cancelledCtx, done := context.WithCancel(context.Background()) + done() + + client := tc(). + SetCommonRetryCount(2). + SetCommonRetryBackoffInterval(1*time.Second, 5*time.Second) + + res, err := client.R().SetContext(cancelledCtx).Get("/") + + tests.AssertEqual(t, 0, res.Request.RetryAttempt) + tests.AssertNotNil(t, err) + tests.AssertErrorContains(t, err, "context canceled") +} + +func TestWrapRoundTrip(t *testing.T) { + i, j, a, b := 0, 0, 0, 0 + c := tc().WrapRoundTripFunc(func(rt RoundTripper) RoundTripFunc { + return func(req *Request) (resp *Response, err error) { + a = 1 + resp, err = rt.RoundTrip(req) + b = 1 + return + } + }) + c.GetTransport().WrapRoundTripFunc(func(rt http.RoundTripper) HttpRoundTripFunc { + return func(req *http.Request) (resp *http.Response, err error) { + i = 1 + resp, err = rt.RoundTrip(req) + j = 1 + return + } + }) + resp, err := c.R().Get("/") + assertSuccess(t, resp, err) + tests.AssertEqual(t, 1, i) + tests.AssertEqual(t, 1, j) + tests.AssertEqual(t, 1, a) + tests.AssertEqual(t, 1, b) +} + +func TestAllowGetMethodPayload(t *testing.T) { + c := tc() + resp, err := c.R().SetBody("test").Get("/payload") + assertSuccess(t, resp, err) + tests.AssertEqual(t, "test", resp.String()) + + c.DisableAllowGetMethodPayload() + resp, err = c.R().SetBody("test").Get("/payload") + assertSuccess(t, resp, err) + tests.AssertEqual(t, "", resp.String()) + + c.EnableAllowGetMethodPayload() + resp, err = c.R().SetBody("test").Get("/payload") + assertSuccess(t, resp, err) + tests.AssertEqual(t, "test", resp.String()) +} + +func TestSetTLSHandshakeTimeout(t *testing.T) { + timeout := 2 * time.Second + c := tc().SetTLSHandshakeTimeout(timeout) + tests.AssertEqual(t, timeout, c.TLSHandshakeTimeout) +} + +func TestSetDial(t *testing.T) { + testErr := errors.New("test") + testDial := func(ctx context.Context, network, addr string) (net.Conn, error) { + return nil, testErr + } + c := tc().SetDial(testDial) + _, err := c.DialContext(nil, "", "") + tests.AssertEqual(t, testErr, err) +} + +func TestSetDialTLS(t *testing.T) { + testErr := errors.New("test") + testDialTLS := func(ctx context.Context, network, addr string) (net.Conn, error) { + return nil, testErr + } + c := tc().SetDialTLS(testDialTLS) + _, err := c.DialTLSContext(nil, "", "") + tests.AssertEqual(t, testErr, err) +} + +func TestSetFuncs(t *testing.T) { + testErr := errors.New("test") + marshalFunc := func(v interface{}) ([]byte, error) { + return nil, testErr + } + unmarshalFunc := func(data []byte, v interface{}) error { + return testErr + } + c := tc(). + SetJsonMarshal(marshalFunc). + SetJsonUnmarshal(unmarshalFunc). + SetXmlMarshal(marshalFunc). + SetXmlUnmarshal(unmarshalFunc) + + _, err := c.jsonMarshal(nil) + tests.AssertEqual(t, testErr, err) + err = c.jsonUnmarshal(nil, nil) + tests.AssertEqual(t, testErr, err) + + _, err = c.xmlMarshal(nil) + tests.AssertEqual(t, testErr, err) + err = c.xmlUnmarshal(nil, nil) + tests.AssertEqual(t, testErr, err) +} + +func TestSetCookieJar(t *testing.T) { + c := tc().SetCookieJar(nil) + tests.AssertEqual(t, nil, c.httpClient.Jar) +} + +func TestTraceAll(t *testing.T) { + c := tc().EnableTraceAll() + resp, err := c.R().Get("/") + assertSuccess(t, resp, err) + tests.AssertEqual(t, true, resp.TraceInfo().TotalTime > 0) + + c.DisableTraceAll() + resp, err = c.R().Get("/") + assertSuccess(t, resp, err) + tests.AssertEqual(t, true, resp.TraceInfo().TotalTime == 0) +} + +func TestOnAfterResponse(t *testing.T) { + c := tc() + len1 := len(c.afterResponse) + c.OnAfterResponse(func(client *Client, response *Response) error { + return nil + }) + len2 := len(c.afterResponse) + tests.AssertEqual(t, true, len1+1 == len2) +} + +func TestOnBeforeRequest(t *testing.T) { + c := tc().OnBeforeRequest(func(client *Client, request *Request) error { + return nil + }) + tests.AssertEqual(t, true, len(c.udBeforeRequest) == 1) +} + +func TestSetProxyURL(t *testing.T) { + c := tc().SetProxyURL("http://dummy.proxy.local") + u, err := c.Proxy(nil) + tests.AssertNoError(t, err) + tests.AssertEqual(t, "http://dummy.proxy.local", u.String()) +} + +func TestSetProxy(t *testing.T) { + u, _ := url.Parse("http://dummy.proxy.local") + proxy := http.ProxyURL(u) + c := tc().SetProxy(proxy) + uu, err := c.Proxy(nil) + tests.AssertNoError(t, err) + tests.AssertEqual(t, u.String(), uu.String()) +} + +func TestSetCommonContentType(t *testing.T) { + c := tc().SetCommonContentType(header.JsonContentType) + tests.AssertEqual(t, header.JsonContentType, c.Headers.Get(header.ContentType)) +} + +func TestSetCommonHeader(t *testing.T) { + c := tc().SetCommonHeader("my-header", "my-value") + tests.AssertEqual(t, "my-value", c.Headers.Get("my-header")) +} + +func TestSetCommonHeaderNonCanonical(t *testing.T) { + c := tc().SetCommonHeaderNonCanonical("my-Header", "my-value") + tests.AssertEqual(t, "my-value", c.Headers["my-Header"][0]) +} + +func TestSetCommonHeaders(t *testing.T) { + c := tc().SetCommonHeaders(map[string]string{ + "header1": "value1", + "header2": "value2", + }) + tests.AssertEqual(t, "value1", c.Headers.Get("header1")) + tests.AssertEqual(t, "value2", c.Headers.Get("header2")) +} + +func TestSetCommonHeadersNonCanonical(t *testing.T) { + c := tc().SetCommonHeadersNonCanonical(map[string]string{ + "my-Header": "my-value", + }) + tests.AssertEqual(t, "my-value", c.Headers["my-Header"][0]) +} + +func TestSetCommonBasicAuth(t *testing.T) { + c := tc().SetCommonBasicAuth("imroc", "123456") + tests.AssertEqual(t, "Basic aW1yb2M6MTIzNDU2", c.Headers.Get("Authorization")) +} + +func TestSetCommonBearerAuthToken(t *testing.T) { + c := tc().SetCommonBearerAuthToken("123456") + tests.AssertEqual(t, "Bearer 123456", c.Headers.Get("Authorization")) +} + +func TestSetUserAgent(t *testing.T) { + c := tc().SetUserAgent("test") + tests.AssertEqual(t, "test", c.Headers.Get(header.UserAgent)) +} + +func TestAutoDecode(t *testing.T) { + c := tc().DisableAutoDecode() + resp, err := c.R().Get("/gbk") + assertSuccess(t, resp, err) + tests.AssertEqual(t, toGbk("鎴戞槸roc"), resp.Bytes()) + + resp, err = c.EnableAutoDecode().R().Get("/gbk") + assertSuccess(t, resp, err) + tests.AssertEqual(t, "鎴戞槸roc", resp.String()) + + resp, err = c.SetAutoDecodeContentType("html").R().Get("/gbk") + assertSuccess(t, resp, err) + tests.AssertEqual(t, toGbk("鎴戞槸roc"), resp.Bytes()) + resp, err = c.SetAutoDecodeContentType("text").R().Get("/gbk") + assertSuccess(t, resp, err) + tests.AssertEqual(t, "鎴戞槸roc", resp.String()) + resp, err = c.SetAutoDecodeContentTypeFunc(func(contentType string) bool { + return strings.Contains(contentType, "text") + }).R().Get("/gbk") + assertSuccess(t, resp, err) + tests.AssertEqual(t, "鎴戞槸roc", resp.String()) + + resp, err = c.SetAutoDecodeAllContentType().R().Get("/gbk-no-charset") + assertSuccess(t, resp, err) + tests.AssertContains(t, resp.String(), "鎴戞槸roc", true) +} + +func TestSetTimeout(t *testing.T) { + timeout := 100 * time.Second + c := tc().SetTimeout(timeout) + tests.AssertEqual(t, timeout, c.httpClient.Timeout) +} + +func TestSetLogger(t *testing.T) { + l := createDefaultLogger() + c := tc().SetLogger(l) + tests.AssertEqual(t, l, c.log) + + c.SetLogger(nil) + tests.AssertEqual(t, &disableLogger{}, c.log) +} + +func TestSetScheme(t *testing.T) { + c := tc().SetScheme("https") + tests.AssertEqual(t, "https", c.scheme) +} + +func TestDebugLog(t *testing.T) { + c := tc().EnableDebugLog() + tests.AssertEqual(t, true, c.DebugLog) + + c.DisableDebugLog() + tests.AssertEqual(t, false, c.DebugLog) +} + +func TestSetCommonCookies(t *testing.T) { + headers := make(http.Header) + resp, err := tc().SetCommonCookies(&http.Cookie{ + Name: "test", + Value: "test", + }).R().SetSuccessResult(&headers).Get("/header") + assertSuccess(t, resp, err) + tests.AssertEqual(t, "test=test", headers.Get("Cookie")) +} + +func TestSetCommonQueryString(t *testing.T) { + resp, err := tc().SetCommonQueryString("test=test").R().Get("/query-parameter") + assertSuccess(t, resp, err) + tests.AssertEqual(t, "test=test", resp.String()) +} + +func TestSetCommonPathParams(t *testing.T) { + c := tc().SetCommonPathParams(map[string]string{"test": "test"}) + tests.AssertNotNil(t, c.PathParams) + tests.AssertEqual(t, "test", c.PathParams["test"]) +} + +func TestSetCommonPathParam(t *testing.T) { + c := tc().SetCommonPathParam("test", "test") + tests.AssertNotNil(t, c.PathParams) + tests.AssertEqual(t, "test", c.PathParams["test"]) +} + +func TestAddCommonQueryParam(t *testing.T) { + resp, err := tc(). + AddCommonQueryParam("test", "1"). + AddCommonQueryParam("test", "2"). + R().Get("/query-parameter") + assertSuccess(t, resp, err) + tests.AssertEqual(t, "test=1&test=2", resp.String()) +} + +func TestSetCommonQueryParam(t *testing.T) { + resp, err := tc().SetCommonQueryParam("test", "test").R().Get("/query-parameter") + assertSuccess(t, resp, err) + tests.AssertEqual(t, "test=test", resp.String()) +} + +func TestSetCommonQueryParams(t *testing.T) { + resp, err := tc().SetCommonQueryParams(map[string]string{"test": "test"}).R().Get("/query-parameter") + assertSuccess(t, resp, err) + tests.AssertEqual(t, "test=test", resp.String()) +} + +func TestInsecureSkipVerify(t *testing.T) { + c := tc().EnableInsecureSkipVerify() + tests.AssertEqual(t, true, c.TLSClientConfig.InsecureSkipVerify) + + c.DisableInsecureSkipVerify() + tests.AssertEqual(t, false, c.TLSClientConfig.InsecureSkipVerify) +} + +func TestSetTLSClientConfig(t *testing.T) { + config := &tls.Config{InsecureSkipVerify: true} + c := tc().SetTLSClientConfig(config) + tests.AssertEqual(t, config, c.TLSClientConfig) +} + +func TestCompression(t *testing.T) { + c := tc().DisableCompression() + tests.AssertEqual(t, true, c.Transport.DisableCompression) + + c.EnableCompression() + tests.AssertEqual(t, false, c.Transport.DisableCompression) +} + +func TestKeepAlives(t *testing.T) { + c := tc().DisableKeepAlives() + tests.AssertEqual(t, true, c.Transport.DisableKeepAlives) + + c.EnableKeepAlives() + tests.AssertEqual(t, false, c.Transport.DisableKeepAlives) +} + +func TestRedirect(t *testing.T) { + _, err := tc().SetRedirectPolicy(NoRedirectPolicy()).R().Get("/unlimited-redirect") + tests.AssertIsNil(t, err) + + _, err = tc().SetRedirectPolicy(MaxRedirectPolicy(3)).R().Get("/unlimited-redirect") + tests.AssertNotNil(t, err) + tests.AssertContains(t, err.Error(), "stopped after 3 redirects", true) + + _, err = tc().SetRedirectPolicy(MaxRedirectPolicy(20)).SetRedirectPolicy(DefaultRedirectPolicy()).R().Get("/unlimited-redirect") + tests.AssertNotNil(t, err) + tests.AssertContains(t, err.Error(), "stopped after 10 redirects", true) + + _, err = tc().SetRedirectPolicy(SameDomainRedirectPolicy()).R().Get("/redirect-to-other") + tests.AssertNotNil(t, err) + tests.AssertContains(t, err.Error(), "different domain name is not allowed", true) + + _, err = tc().SetRedirectPolicy(SameHostRedirectPolicy()).R().Get("/redirect-to-other") + tests.AssertNotNil(t, err) + tests.AssertContains(t, err.Error(), "different host name is not allowed", true) + + _, err = tc().SetRedirectPolicy(AllowedHostRedirectPolicy("localhost", "127.0.0.1")).R().Get("/redirect-to-other") + tests.AssertNotNil(t, err) + tests.AssertContains(t, err.Error(), "redirect host [dummy.local] is not allowed", true) + + _, err = tc().SetRedirectPolicy(AllowedDomainRedirectPolicy("localhost", "127.0.0.1")).R().Get("/redirect-to-other") + tests.AssertNotNil(t, err) + tests.AssertContains(t, err.Error(), "redirect domain [dummy.local] is not allowed", true) + + c := tc().SetRedirectPolicy(AlwaysCopyHeaderRedirectPolicy("Authorization")) + newHeader := make(http.Header) + oldHeader := make(http.Header) + oldHeader.Set("Authorization", "test") + c.GetClient().CheckRedirect(&http.Request{ + Header: newHeader, + }, []*http.Request{&http.Request{ + Header: oldHeader, + }}) + tests.AssertEqual(t, "test", newHeader.Get("Authorization")) +} + +func TestGetTLSClientConfig(t *testing.T) { + c := tc() + config := c.GetTLSClientConfig() + tests.AssertEqual(t, true, c.TLSClientConfig != nil) + tests.AssertEqual(t, config, c.TLSClientConfig) +} + +func TestSetRootCertFromFile(t *testing.T) { + c := tc().SetRootCertsFromFile(tests.GetTestFilePath("sample-root.pem")) + tests.AssertEqual(t, true, c.TLSClientConfig.RootCAs != nil) +} + +func TestSetRootCertFromString(t *testing.T) { + c := tc().SetRootCertFromString(string(getTestFileContent(t, "sample-root.pem"))) + tests.AssertEqual(t, true, c.TLSClientConfig.RootCAs != nil) +} + +func TestSetCerts(t *testing.T) { + c := tc().SetCerts(tls.Certificate{}, tls.Certificate{}) + tests.AssertEqual(t, true, len(c.TLSClientConfig.Certificates) == 2) +} + +func TestSetCertFromFile(t *testing.T) { + c := tc().SetCertFromFile( + tests.GetTestFilePath("sample-client.pem"), + tests.GetTestFilePath("sample-client-key.pem"), + ) + tests.AssertEqual(t, true, len(c.TLSClientConfig.Certificates) == 1) +} + +func TestSetOutputDirectory(t *testing.T) { + outFile := "test_output_dir" + resp, err := tc(). + SetOutputDirectory(testDataPath). + R().SetOutputFile(outFile). + Get("/") + assertSuccess(t, resp, err) + content := string(getTestFileContent(t, outFile)) + os.Remove(tests.GetTestFilePath(outFile)) + tests.AssertEqual(t, "TestGet: text response", content) +} + +func TestSetBaseURL(t *testing.T) { + baseURL := "http://dummy-req.local/test" + resp, _ := tc().SetTimeout(time.Nanosecond).SetBaseURL(baseURL).R().Get("/req") + tests.AssertEqual(t, baseURL+"/req", resp.Request.RawRequest.URL.String()) +} + +func TestSetCommonFormDataFromValues(t *testing.T) { + expectedForm := make(url.Values) + gotForm := make(url.Values) + expectedForm.Set("test", "test") + resp, err := tc(). + SetCommonFormDataFromValues(expectedForm). + R().SetSuccessResult(&gotForm). + Post("/form") + assertSuccess(t, resp, err) + tests.AssertEqual(t, "test", gotForm.Get("test")) +} + +func TestSetCommonFormData(t *testing.T) { + form := make(url.Values) + resp, err := tc(). + SetCommonFormData( + map[string]string{ + "test": "test", + }).R(). + SetSuccessResult(&form). + Post("/form") + assertSuccess(t, resp, err) + tests.AssertEqual(t, "test", form.Get("test")) +} + +func TestSetMultipartBoundaryFunc(t *testing.T) { + delimiter := "test-delimiter" + expectedContentType := fmt.Sprintf("multipart/form-data; boundary=%s", delimiter) + resp, err := tc(). + SetMultipartBoundaryFunc(func() string { + return delimiter + }).R(). + EnableForceMultipart(). + SetFormData( + map[string]string{ + "test": "test", + }). + Post("/content-type") + assertSuccess(t, resp, err) + tests.AssertEqual(t, expectedContentType, resp.String()) +} + +func TestFirefoxMultipartBoundaryFunc(t *testing.T) { + r := regexp.MustCompile(`^-------------------------\d{1,10}\d{1,10}\d{1,10}$`) + b := firefoxMultipartBoundaryFunc() + tests.AssertEqual(t, true, r.MatchString(b)) +} + +func TestWebkitMultipartBoundaryFunc(t *testing.T) { + r := regexp.MustCompile(`^----WebKitFormBoundary[0-9a-zA-Z]{16}$`) + b := webkitMultipartBoundaryFunc() + tests.AssertEqual(t, true, r.MatchString(b)) +} + +func TestClientClone(t *testing.T) { + c1 := tc().DevMode(). + SetCommonHeader("test", "test"). + SetCommonCookies(&http.Cookie{ + Name: "test", + Value: "test", + }).SetCommonQueryParam("test", "test"). + SetCommonPathParam("test", "test"). + SetCommonRetryCount(2). + SetCommonFormData(map[string]string{"test": "test"}). + OnBeforeRequest(func(c *Client, r *Request) error { return nil }) + + c2 := c1.Clone() + assertClone(t, c1, c2) +} + +func TestDisableAutoReadResponse(t *testing.T) { + testWithAllTransport(t, testDisableAutoReadResponse) +} + +func testDisableAutoReadResponse(t *testing.T, c *Client) { + c.DisableAutoReadResponse() + resp, err := c.R().Get("/") + assertSuccess(t, resp, err) + tests.AssertEqual(t, "", resp.String()) + result, err := resp.ToString() + tests.AssertNoError(t, err) + tests.AssertEqual(t, "TestGet: text response", result) + + resp, err = c.R().Get("/") + assertSuccess(t, resp, err) + _, err = io.ReadAll(resp.Body) + tests.AssertNoError(t, err) +} + +func testEnableDumpAll(t *testing.T, fn func(c *Client) (de dumpExpected)) { + testDump := func(c *Client) { + buff := new(bytes.Buffer) + c.EnableDumpAllTo(buff) + r := c.R() + de := fn(c) + resp, err := r.SetBody(`test body`).Post("/") + assertSuccess(t, resp, err) + dump := buff.String() + tests.AssertContains(t, dump, "user-agent", de.ReqHeader) + tests.AssertContains(t, dump, "test body", de.ReqBody) + tests.AssertContains(t, dump, "date", de.RespHeader) + tests.AssertContains(t, dump, "testpost: text response", de.RespBody) + } + c := tc() + testDump(c) + testDump(c.EnableForceHTTP1()) +} + +func TestEnableDumpAll(t *testing.T) { + testCases := []func(c *Client) (d dumpExpected){ + func(c *Client) (de dumpExpected) { + c.EnableDumpAll() + de.ReqHeader = true + de.ReqBody = true + de.RespHeader = true + de.RespBody = true + return + }, + func(c *Client) (de dumpExpected) { + c.EnableDumpAllWithoutHeader() + de.ReqBody = true + de.RespBody = true + return + }, + func(c *Client) (de dumpExpected) { + c.EnableDumpAllWithoutBody() + de.ReqHeader = true + de.RespHeader = true + return + }, + func(c *Client) (de dumpExpected) { + c.EnableDumpAllWithoutRequest() + de.RespHeader = true + de.RespBody = true + return + }, + func(c *Client) (de dumpExpected) { + c.EnableDumpAllWithoutRequestBody() + de.ReqHeader = true + de.RespHeader = true + de.RespBody = true + return + }, + func(c *Client) (de dumpExpected) { + c.EnableDumpAllWithoutResponse() + de.ReqHeader = true + de.ReqBody = true + return + }, + func(c *Client) (de dumpExpected) { + c.EnableDumpAllWithoutResponseBody() + de.ReqHeader = true + de.ReqBody = true + de.RespHeader = true + return + }, + func(c *Client) (de dumpExpected) { + c.SetCommonDumpOptions(&DumpOptions{ + RequestHeader: true, + RequestBody: true, + ResponseBody: true, + }).EnableDumpAll() + de.ReqHeader = true + de.ReqBody = true + de.RespBody = true + return + }, + } + for _, fn := range testCases { + testEnableDumpAll(t, fn) + } +} + +func TestEnableDumpAllToFile(t *testing.T) { + c := tc() + dumpFile := "tmp_test_dump_file" + c.EnableDumpAllToFile(tests.GetTestFilePath(dumpFile)) + resp, err := c.R().SetBody("test body").Post("/") + assertSuccess(t, resp, err) + dump := string(getTestFileContent(t, dumpFile)) + os.Remove(tests.GetTestFilePath(dumpFile)) + tests.AssertContains(t, dump, "user-agent", true) + tests.AssertContains(t, dump, "test body", true) + tests.AssertContains(t, dump, "date", true) + tests.AssertContains(t, dump, "testpost: text response", true) +} + +func TestEnableDumpAllAsync(t *testing.T) { + c := tc() + buf := new(bytes.Buffer) + c.EnableDumpAllTo(buf).EnableDumpAllAsync() + tests.AssertEqual(t, true, c.getDumpOptions().Async) +} + +func TestSetResponseBodyTransformer(t *testing.T) { + c := tc().SetResponseBodyTransformer(func(rawBody []byte, req *Request, resp *Response) (transformedBody []byte, err error) { + if resp.IsSuccessState() { + result, err := url.QueryUnescape(string(rawBody)) + return []byte(result), err + } + return rawBody, nil + }) + user := &UserInfo{} + resp, err := c.R().SetSuccessResult(user).Get("/urlencode") + assertSuccess(t, resp, err) + tests.AssertEqual(t, user.Username, "鎴戞槸roc") + tests.AssertEqual(t, user.Email, "roc@imroc.cc") +} + +func TestSetResultStateCheckFunc(t *testing.T) { + c := tc().SetResultStateCheckFunc(func(resp *Response) ResultState { + if resp.StatusCode == http.StatusOK { + return SuccessState + } else { + return ErrorState + } + }) + resp, err := c.R().Get("/status?code=200") + tests.AssertNoError(t, err) + tests.AssertEqual(t, SuccessState, resp.ResultState()) + + resp, err = c.R().Get("/status?code=201") + tests.AssertNoError(t, err) + tests.AssertEqual(t, ErrorState, resp.ResultState()) + + resp, err = c.R().Get("/status?code=399") + tests.AssertNoError(t, err) + tests.AssertEqual(t, ErrorState, resp.ResultState()) + + resp, err = c.R().Get("/status?code=404") + tests.AssertNoError(t, err) + tests.AssertEqual(t, ErrorState, resp.ResultState()) +} +func TestCloneCookieJar(t *testing.T) { + c1 := C() + c2 := c1.Clone() + tests.AssertEqual(t, true, c1.httpClient.Jar != c2.httpClient.Jar) + + jar, _ := cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List}) + c1.SetCookieJar(jar) + c2 = c1.Clone() + tests.AssertEqual(t, true, c1.httpClient.Jar == c2.httpClient.Jar) + + c2.SetCookieJar(nil) + tests.AssertEqual(t, true, c2.cookiejarFactory == nil) + tests.AssertEqual(t, true, c2.httpClient.Jar == nil) +} diff --git a/client_wrapper.go b/client_wrapper.go new file mode 100644 index 00000000..1e773d97 --- /dev/null +++ b/client_wrapper.go @@ -0,0 +1,808 @@ +package req + +import ( + "context" + "crypto/tls" + "io" + "net" + "net/http" + "net/url" + "time" + + "github.com/imroc/req/v3/http2" + utls "github.com/refraction-networking/utls" +) + +// WrapRoundTrip is a global wrapper methods which delegated +// to the default client's Client.WrapRoundTrip. +func WrapRoundTrip(wrappers ...RoundTripWrapper) *Client { + return defaultClient.WrapRoundTrip(wrappers...) +} + +// WrapRoundTripFunc is a global wrapper methods which delegated +// to the default client's Client.WrapRoundTripFunc. +func WrapRoundTripFunc(funcs ...RoundTripWrapperFunc) *Client { + return defaultClient.WrapRoundTripFunc(funcs...) +} + +// SetCommonError is a global wrapper methods which delegated +// to the default client's Client.SetCommonErrorResult. +// +// Deprecated: Use SetCommonErrorResult instead. +func SetCommonError(err interface{}) *Client { + return defaultClient.SetCommonErrorResult(err) +} + +// SetCommonErrorResult is a global wrapper methods which delegated +// to the default client's Client.SetCommonError. +func SetCommonErrorResult(err interface{}) *Client { + return defaultClient.SetCommonErrorResult(err) +} + +// SetResultStateCheckFunc is a global wrapper methods which delegated +// to the default client's Client.SetCommonResultStateCheckFunc. +func SetResultStateCheckFunc(fn func(resp *Response) ResultState) *Client { + return defaultClient.SetResultStateCheckFunc(fn) +} + +// SetCommonFormDataFromValues is a global wrapper methods which delegated +// to the default client's Client.SetCommonFormDataFromValues. +func SetCommonFormDataFromValues(data url.Values) *Client { + return defaultClient.SetCommonFormDataFromValues(data) +} + +// SetCommonFormData is a global wrapper methods which delegated +// to the default client's Client.SetCommonFormData. +func SetCommonFormData(data map[string]string) *Client { + return defaultClient.SetCommonFormData(data) +} + +// SetMultipartBoundaryFunc is a global wrapper methods which delegated +// to the default client's Client.SetMultipartBoundaryFunc. +func SetMultipartBoundaryFunc(fn func() string) *Client { + return defaultClient.SetMultipartBoundaryFunc(fn) +} + +// SetBaseURL is a global wrapper methods which delegated +// to the default client's Client.SetBaseURL. +func SetBaseURL(u string) *Client { + return defaultClient.SetBaseURL(u) +} + +// SetOutputDirectory is a global wrapper methods which delegated +// to the default client's Client.SetOutputDirectory. +func SetOutputDirectory(dir string) *Client { + return defaultClient.SetOutputDirectory(dir) +} + +// SetCertFromFile is a global wrapper methods which delegated +// to the default client's Client.SetCertFromFile. +func SetCertFromFile(certFile, keyFile string) *Client { + return defaultClient.SetCertFromFile(certFile, keyFile) +} + +// SetCerts is a global wrapper methods which delegated +// to the default client's Client.SetCerts. +func SetCerts(certs ...tls.Certificate) *Client { + return defaultClient.SetCerts(certs...) +} + +// SetRootCertFromString is a global wrapper methods which delegated +// to the default client's Client.SetRootCertFromString. +func SetRootCertFromString(pemContent string) *Client { + return defaultClient.SetRootCertFromString(pemContent) +} + +// SetRootCertsFromFile is a global wrapper methods which delegated +// to the default client's Client.SetRootCertsFromFile. +func SetRootCertsFromFile(pemFiles ...string) *Client { + return defaultClient.SetRootCertsFromFile(pemFiles...) +} + +// GetTLSClientConfig is a global wrapper methods which delegated +// to the default client's Client.GetTLSClientConfig. +func GetTLSClientConfig() *tls.Config { + return defaultClient.GetTLSClientConfig() +} + +// SetRedirectPolicy is a global wrapper methods which delegated +// to the default client's Client.SetRedirectPolicy. +func SetRedirectPolicy(policies ...RedirectPolicy) *Client { + return defaultClient.SetRedirectPolicy(policies...) +} + +// DisableKeepAlives is a global wrapper methods which delegated +// to the default client's Client.DisableKeepAlives. +func DisableKeepAlives() *Client { + return defaultClient.DisableKeepAlives() +} + +// EnableKeepAlives is a global wrapper methods which delegated +// to the default client's Client.EnableKeepAlives. +func EnableKeepAlives() *Client { + return defaultClient.EnableKeepAlives() +} + +// DisableCompression is a global wrapper methods which delegated +// to the default client's Client.DisableCompression. +func DisableCompression() *Client { + return defaultClient.DisableCompression() +} + +// EnableCompression is a global wrapper methods which delegated +// to the default client's Client.EnableCompression. +func EnableCompression() *Client { + return defaultClient.EnableCompression() +} + +// SetTLSClientConfig is a global wrapper methods which delegated +// to the default client's Client.SetTLSClientConfig. +func SetTLSClientConfig(conf *tls.Config) *Client { + return defaultClient.SetTLSClientConfig(conf) +} + +// EnableInsecureSkipVerify is a global wrapper methods which delegated +// to the default client's Client.EnableInsecureSkipVerify. +func EnableInsecureSkipVerify() *Client { + return defaultClient.EnableInsecureSkipVerify() +} + +// DisableInsecureSkipVerify is a global wrapper methods which delegated +// to the default client's Client.DisableInsecureSkipVerify. +func DisableInsecureSkipVerify() *Client { + return defaultClient.DisableInsecureSkipVerify() +} + +// SetCommonQueryParams is a global wrapper methods which delegated +// to the default client's Client.SetCommonQueryParams. +func SetCommonQueryParams(params map[string]string) *Client { + return defaultClient.SetCommonQueryParams(params) +} + +// AddCommonQueryParam is a global wrapper methods which delegated +// to the default client's Client.AddCommonQueryParam. +func AddCommonQueryParam(key, value string) *Client { + return defaultClient.AddCommonQueryParam(key, value) +} + +// AddCommonQueryParams is a global wrapper methods which delegated +// to the default client's Client.AddCommonQueryParams. +func AddCommonQueryParams(key string, values ...string) *Client { + return defaultClient.AddCommonQueryParams(key, values...) +} + +// SetCommonPathParam is a global wrapper methods which delegated +// to the default client's Client.SetCommonPathParam. +func SetCommonPathParam(key, value string) *Client { + return defaultClient.SetCommonPathParam(key, value) +} + +// SetCommonPathParams is a global wrapper methods which delegated +// to the default client's Client.SetCommonPathParams. +func SetCommonPathParams(pathParams map[string]string) *Client { + return defaultClient.SetCommonPathParams(pathParams) +} + +// SetCommonQueryParam is a global wrapper methods which delegated +// to the default client's Client.SetCommonQueryParam. +func SetCommonQueryParam(key, value string) *Client { + return defaultClient.SetCommonQueryParam(key, value) +} + +// SetCommonQueryString is a global wrapper methods which delegated +// to the default client's Client.SetCommonQueryString. +func SetCommonQueryString(query string) *Client { + return defaultClient.SetCommonQueryString(query) +} + +// SetCommonCookies is a global wrapper methods which delegated +// to the default client's Client.SetCommonCookies. +func SetCommonCookies(cookies ...*http.Cookie) *Client { + return defaultClient.SetCommonCookies(cookies...) +} + +// DisableDebugLog is a global wrapper methods which delegated +// to the default client's Client.DisableDebugLog. +func DisableDebugLog() *Client { + return defaultClient.DisableDebugLog() +} + +// EnableDebugLog is a global wrapper methods which delegated +// to the default client's Client.EnableDebugLog. +func EnableDebugLog() *Client { + return defaultClient.EnableDebugLog() +} + +// DevMode is a global wrapper methods which delegated +// to the default client's Client.DevMode. +func DevMode() *Client { + return defaultClient.DevMode() +} + +// SetScheme is a global wrapper methods which delegated +// to the default client's Client.SetScheme. +func SetScheme(scheme string) *Client { + return defaultClient.SetScheme(scheme) +} + +// SetLogger is a global wrapper methods which delegated +// to the default client's Client.SetLogger. +func SetLogger(log Logger) *Client { + return defaultClient.SetLogger(log) +} + +// SetTimeout is a global wrapper methods which delegated +// to the default client's Client.SetTimeout. +func SetTimeout(d time.Duration) *Client { + return defaultClient.SetTimeout(d) +} + +// EnableDumpAll is a global wrapper methods which delegated +// to the default client's Client.EnableDumpAll. +func EnableDumpAll() *Client { + return defaultClient.EnableDumpAll() +} + +// EnableDumpAllToFile is a global wrapper methods which delegated +// to the default client's Client.EnableDumpAllToFile. +func EnableDumpAllToFile(filename string) *Client { + return defaultClient.EnableDumpAllToFile(filename) +} + +// EnableDumpAllTo is a global wrapper methods which delegated +// to the default client's Client.EnableDumpAllTo. +func EnableDumpAllTo(output io.Writer) *Client { + return defaultClient.EnableDumpAllTo(output) +} + +// EnableDumpAllAsync is a global wrapper methods which delegated +// to the default client's Client.EnableDumpAllAsync. +func EnableDumpAllAsync() *Client { + return defaultClient.EnableDumpAllAsync() +} + +// EnableDumpAllWithoutRequestBody is a global wrapper methods which delegated +// to the default client's Client.EnableDumpAllWithoutRequestBody. +func EnableDumpAllWithoutRequestBody() *Client { + return defaultClient.EnableDumpAllWithoutRequestBody() +} + +// EnableDumpAllWithoutResponseBody is a global wrapper methods which delegated +// to the default client's Client.EnableDumpAllWithoutResponseBody. +func EnableDumpAllWithoutResponseBody() *Client { + return defaultClient.EnableDumpAllWithoutResponseBody() +} + +// EnableDumpAllWithoutResponse is a global wrapper methods which delegated +// to the default client's Client.EnableDumpAllWithoutResponse. +func EnableDumpAllWithoutResponse() *Client { + return defaultClient.EnableDumpAllWithoutResponse() +} + +// EnableDumpAllWithoutRequest is a global wrapper methods which delegated +// to the default client's Client.EnableDumpAllWithoutRequest. +func EnableDumpAllWithoutRequest() *Client { + return defaultClient.EnableDumpAllWithoutRequest() +} + +// EnableDumpAllWithoutHeader is a global wrapper methods which delegated +// to the default client's Client.EnableDumpAllWithoutHeader. +func EnableDumpAllWithoutHeader() *Client { + return defaultClient.EnableDumpAllWithoutHeader() +} + +// EnableDumpAllWithoutBody is a global wrapper methods which delegated +// to the default client's Client.EnableDumpAllWithoutBody. +func EnableDumpAllWithoutBody() *Client { + return defaultClient.EnableDumpAllWithoutBody() +} + +// EnableDumpEachRequest is a global wrapper methods which delegated +// to the default client's Client.EnableDumpEachRequest. +func EnableDumpEachRequest() *Client { + return defaultClient.EnableDumpEachRequest() +} + +// EnableDumpEachRequestWithoutBody is a global wrapper methods which delegated +// to the default client's Client.EnableDumpEachRequestWithoutBody. +func EnableDumpEachRequestWithoutBody() *Client { + return defaultClient.EnableDumpEachRequestWithoutBody() +} + +// EnableDumpEachRequestWithoutHeader is a global wrapper methods which delegated +// to the default client's Client.EnableDumpEachRequestWithoutHeader. +func EnableDumpEachRequestWithoutHeader() *Client { + return defaultClient.EnableDumpEachRequestWithoutHeader() +} + +// EnableDumpEachRequestWithoutResponse is a global wrapper methods which delegated +// to the default client's Client.EnableDumpEachRequestWithoutResponse. +func EnableDumpEachRequestWithoutResponse() *Client { + return defaultClient.EnableDumpEachRequestWithoutResponse() +} + +// EnableDumpEachRequestWithoutRequest is a global wrapper methods which delegated +// to the default client's Client.EnableDumpEachRequestWithoutRequest. +func EnableDumpEachRequestWithoutRequest() *Client { + return defaultClient.EnableDumpEachRequestWithoutRequest() +} + +// EnableDumpEachRequestWithoutResponseBody is a global wrapper methods which delegated +// to the default client's Client.EnableDumpEachRequestWithoutResponseBody. +func EnableDumpEachRequestWithoutResponseBody() *Client { + return defaultClient.EnableDumpEachRequestWithoutResponseBody() +} + +// EnableDumpEachRequestWithoutRequestBody is a global wrapper methods which delegated +// to the default client's Client.EnableDumpEachRequestWithoutRequestBody. +func EnableDumpEachRequestWithoutRequestBody() *Client { + return defaultClient.EnableDumpEachRequestWithoutRequestBody() +} + +// DisableAutoReadResponse is a global wrapper methods which delegated +// to the default client's Client.DisableAutoReadResponse. +func DisableAutoReadResponse() *Client { + return defaultClient.DisableAutoReadResponse() +} + +// EnableAutoReadResponse is a global wrapper methods which delegated +// to the default client's Client.EnableAutoReadResponse. +func EnableAutoReadResponse() *Client { + return defaultClient.EnableAutoReadResponse() +} + +// SetAutoDecodeContentType is a global wrapper methods which delegated +// to the default client's Client.SetAutoDecodeContentType. +func SetAutoDecodeContentType(contentTypes ...string) *Client { + return defaultClient.SetAutoDecodeContentType(contentTypes...) +} + +// SetAutoDecodeContentTypeFunc is a global wrapper methods which delegated +// to the default client's Client.SetAutoDecodeAllTypeFunc. +func SetAutoDecodeContentTypeFunc(fn func(contentType string) bool) *Client { + return defaultClient.SetAutoDecodeContentTypeFunc(fn) +} + +// SetAutoDecodeAllContentType is a global wrapper methods which delegated +// to the default client's Client.SetAutoDecodeAllContentType. +func SetAutoDecodeAllContentType() *Client { + return defaultClient.SetAutoDecodeAllContentType() +} + +// DisableAutoDecode is a global wrapper methods which delegated +// to the default client's Client.DisableAutoDecode. +func DisableAutoDecode() *Client { + return defaultClient.DisableAutoDecode() +} + +// EnableAutoDecode is a global wrapper methods which delegated +// to the default client's Client.EnableAutoDecode. +func EnableAutoDecode() *Client { + return defaultClient.EnableAutoDecode() +} + +// SetUserAgent is a global wrapper methods which delegated +// to the default client's Client.SetUserAgent. +func SetUserAgent(userAgent string) *Client { + return defaultClient.SetUserAgent(userAgent) +} + +// SetCommonBearerAuthToken is a global wrapper methods which delegated +// to the default client's Client.SetCommonBearerAuthToken. +func SetCommonBearerAuthToken(token string) *Client { + return defaultClient.SetCommonBearerAuthToken(token) +} + +// SetCommonBasicAuth is a global wrapper methods which delegated +// to the default client's Client.SetCommonBasicAuth. +func SetCommonBasicAuth(username, password string) *Client { + return defaultClient.SetCommonBasicAuth(username, password) +} + +// SetCommonDigestAuth is a global wrapper methods which delegated +// to the default client's Client.SetCommonDigestAuth. +func SetCommonDigestAuth(username, password string) *Client { + return defaultClient.SetCommonDigestAuth(username, password) +} + +// SetCommonHeaders is a global wrapper methods which delegated +// to the default client's Client.SetCommonHeaders. +func SetCommonHeaders(hdrs map[string]string) *Client { + return defaultClient.SetCommonHeaders(hdrs) +} + +// SetCommonHeader is a global wrapper methods which delegated +// to the default client's Client.SetCommonHeader. +func SetCommonHeader(key, value string) *Client { + return defaultClient.SetCommonHeader(key, value) +} + +// SetCommonHeaderOrder is a global wrapper methods which delegated +// to the default client's Client.SetCommonHeaderOrder. +func SetCommonHeaderOrder(keys ...string) *Client { + return defaultClient.SetCommonHeaderOrder(keys...) +} + +// SetCommonPseudoHeaderOder is a global wrapper methods which delegated +// to the default client's Client.SetCommonPseudoHeaderOder. +func SetCommonPseudoHeaderOder(keys ...string) *Client { + return defaultClient.SetCommonPseudoHeaderOder(keys...) +} + +// SetHTTP2SettingsFrame is a global wrapper methods which delegated +// to the default client's Client.SetHTTP2SettingsFrame. +func SetHTTP2SettingsFrame(settings ...http2.Setting) *Client { + return defaultClient.SetHTTP2SettingsFrame(settings...) +} + +// SetHTTP2ConnectionFlow is a global wrapper methods which delegated +// to the default client's Client.SetHTTP2ConnectionFlow. +func SetHTTP2ConnectionFlow(flow uint32) *Client { + return defaultClient.SetHTTP2ConnectionFlow(flow) +} + +// SetHTTP2HeaderPriority is a global wrapper methods which delegated +// to the default client's Client.SetHTTP2HeaderPriority. +func SetHTTP2HeaderPriority(priority http2.PriorityParam) *Client { + return defaultClient.SetHTTP2HeaderPriority(priority) +} + +// SetHTTP2PriorityFrames is a global wrapper methods which delegated +// to the default client's Client.SetHTTP2PriorityFrames. +func SetHTTP2PriorityFrames(frames ...http2.PriorityFrame) *Client { + return defaultClient.SetHTTP2PriorityFrames(frames...) +} + +// SetHTTP2MaxHeaderListSize is a global wrapper methods which delegated +// to the default client's Client.SetHTTP2MaxHeaderListSize. +func SetHTTP2MaxHeaderListSize(max uint32) *Client { + return defaultClient.SetHTTP2MaxHeaderListSize(max) +} + +// SetHTTP2StrictMaxConcurrentStreams is a global wrapper methods which delegated +// to the default client's Client.SetHTTP2StrictMaxConcurrentStreams. +func SetHTTP2StrictMaxConcurrentStreams(strict bool) *Client { + return defaultClient.SetHTTP2StrictMaxConcurrentStreams(strict) +} + +// SetHTTP2ReadIdleTimeout is a global wrapper methods which delegated +// to the default client's Client.SetHTTP2ReadIdleTimeout. +func SetHTTP2ReadIdleTimeout(timeout time.Duration) *Client { + return defaultClient.SetHTTP2ReadIdleTimeout(timeout) +} + +// SetHTTP2PingTimeout is a global wrapper methods which delegated +// to the default client's Client.SetHTTP2PingTimeout. +func SetHTTP2PingTimeout(timeout time.Duration) *Client { + return defaultClient.SetHTTP2PingTimeout(timeout) +} + +// SetHTTP2WriteByteTimeout is a global wrapper methods which delegated +// to the default client's Client.SetHTTP2WriteByteTimeout. +func SetHTTP2WriteByteTimeout(timeout time.Duration) *Client { + return defaultClient.SetHTTP2WriteByteTimeout(timeout) +} + +// ImpersonateChrome is a global wrapper methods which delegated +// to the default client's Client.ImpersonateChrome. +func ImpersonateChrome() *Client { + return defaultClient.ImpersonateChrome() +} + +// ImpersonateChrome is a global wrapper methods which delegated +// to the default client's Client.ImpersonateChrome. +func ImpersonateFirefox() *Client { + return defaultClient.ImpersonateFirefox() +} + +// ImpersonateChrome is a global wrapper methods which delegated +// to the default client's Client.ImpersonateChrome. +func ImpersonateSafari() *Client { + return defaultClient.ImpersonateFirefox() +} + +// SetCommonContentType is a global wrapper methods which delegated +// to the default client's Client.SetCommonContentType. +func SetCommonContentType(ct string) *Client { + return defaultClient.SetCommonContentType(ct) +} + +// DisableDumpAll is a global wrapper methods which delegated +// to the default client's Client.DisableDumpAll. +func DisableDumpAll() *Client { + return defaultClient.DisableDumpAll() +} + +// SetCommonDumpOptions is a global wrapper methods which delegated +// to the default client's Client.SetCommonDumpOptions. +func SetCommonDumpOptions(opt *DumpOptions) *Client { + return defaultClient.SetCommonDumpOptions(opt) +} + +// SetProxy is a global wrapper methods which delegated +// to the default client's Client.SetProxy. +func SetProxy(proxy func(*http.Request) (*url.URL, error)) *Client { + return defaultClient.SetProxy(proxy) +} + +// OnBeforeRequest is a global wrapper methods which delegated +// to the default client's Client.OnBeforeRequest. +func OnBeforeRequest(m RequestMiddleware) *Client { + return defaultClient.OnBeforeRequest(m) +} + +// OnAfterResponse is a global wrapper methods which delegated +// to the default client's Client.OnAfterResponse. +func OnAfterResponse(m ResponseMiddleware) *Client { + return defaultClient.OnAfterResponse(m) +} + +// SetProxyURL is a global wrapper methods which delegated +// to the default client's Client.SetProxyURL. +func SetProxyURL(proxyUrl string) *Client { + return defaultClient.SetProxyURL(proxyUrl) +} + +// DisableTraceAll is a global wrapper methods which delegated +// to the default client's Client.DisableTraceAll. +func DisableTraceAll() *Client { + return defaultClient.DisableTraceAll() +} + +// EnableTraceAll is a global wrapper methods which delegated +// to the default client's Client.EnableTraceAll. +func EnableTraceAll() *Client { + return defaultClient.EnableTraceAll() +} + +// SetCookieJar is a global wrapper methods which delegated +// to the default client's Client.SetCookieJar. +func SetCookieJar(jar http.CookieJar) *Client { + return defaultClient.SetCookieJar(jar) +} + +// GetCookies is a global wrapper methods which delegated +// to the default client's Client.GetCookies. +func GetCookies(url string) ([]*http.Cookie, error) { + return defaultClient.GetCookies(url) +} + +// ClearCookies is a global wrapper methods which delegated +// to the default client's Client.ClearCookies. +func ClearCookies() *Client { + return defaultClient.ClearCookies() +} + +// SetJsonMarshal is a global wrapper methods which delegated +// to the default client's Client.SetJsonMarshal. +func SetJsonMarshal(fn func(v interface{}) ([]byte, error)) *Client { + return defaultClient.SetJsonMarshal(fn) +} + +// SetJsonUnmarshal is a global wrapper methods which delegated +// to the default client's Client.SetJsonUnmarshal. +func SetJsonUnmarshal(fn func(data []byte, v interface{}) error) *Client { + return defaultClient.SetJsonUnmarshal(fn) +} + +// SetXmlMarshal is a global wrapper methods which delegated +// to the default client's Client.SetXmlMarshal. +func SetXmlMarshal(fn func(v interface{}) ([]byte, error)) *Client { + return defaultClient.SetXmlMarshal(fn) +} + +// SetXmlUnmarshal is a global wrapper methods which delegated +// to the default client's Client.SetXmlUnmarshal. +func SetXmlUnmarshal(fn func(data []byte, v interface{}) error) *Client { + return defaultClient.SetXmlUnmarshal(fn) +} + +// SetDialTLS is a global wrapper methods which delegated +// to the default client's Client.SetDialTLS. +func SetDialTLS(fn func(ctx context.Context, network, addr string) (net.Conn, error)) *Client { + return defaultClient.SetDialTLS(fn) +} + +// SetDial is a global wrapper methods which delegated +// to the default client's Client.SetDial. +func SetDial(fn func(ctx context.Context, network, addr string) (net.Conn, error)) *Client { + return defaultClient.SetDial(fn) +} + +// SetTLSHandshakeTimeout is a global wrapper methods which delegated +// to the default client's Client.SetTLSHandshakeTimeout. +func SetTLSHandshakeTimeout(timeout time.Duration) *Client { + return defaultClient.SetTLSHandshakeTimeout(timeout) +} + +// EnableForceHTTP1 is a global wrapper methods which delegated +// to the default client's Client.EnableForceHTTP1. +func EnableForceHTTP1() *Client { + return defaultClient.EnableForceHTTP1() +} + +// EnableForceHTTP2 is a global wrapper methods which delegated +// to the default client's Client.EnableForceHTTP2. +func EnableForceHTTP2() *Client { + return defaultClient.EnableForceHTTP2() +} + +// EnableForceHTTP3 is a global wrapper methods which delegated +// to the default client's Client.EnableForceHTTP3. +func EnableForceHTTP3() *Client { + return defaultClient.EnableForceHTTP3() +} + +// EnableHTTP3 is a global wrapper methods which delegated +// to the default client's Client.EnableHTTP3. +func EnableHTTP3() *Client { + return defaultClient.EnableHTTP3() +} + +// DisableForceHttpVersion is a global wrapper methods which delegated +// to the default client's Client.DisableForceHttpVersion. +func DisableForceHttpVersion() *Client { + return defaultClient.DisableForceHttpVersion() +} + +// EnableH2C is a global wrapper methods which delegated +// to the default client's Client.EnableH2C. +func EnableH2C() *Client { + return defaultClient.EnableH2C() +} + +// DisableH2C is a global wrapper methods which delegated +// to the default client's Client.DisableH2C. +func DisableH2C() *Client { + return defaultClient.DisableH2C() +} + +// DisableAllowGetMethodPayload is a global wrapper methods which delegated +// to the default client's Client.DisableAllowGetMethodPayload. +func DisableAllowGetMethodPayload() *Client { + return defaultClient.DisableAllowGetMethodPayload() +} + +// EnableAllowGetMethodPayload is a global wrapper methods which delegated +// to the default client's Client.EnableAllowGetMethodPayload. +func EnableAllowGetMethodPayload() *Client { + return defaultClient.EnableAllowGetMethodPayload() +} + +// SetCommonRetryCount is a global wrapper methods which delegated +// to the default client's Client.SetCommonRetryCount. +func SetCommonRetryCount(count int) *Client { + return defaultClient.SetCommonRetryCount(count) +} + +// SetCommonRetryInterval is a global wrapper methods which delegated +// to the default client's Client.SetCommonRetryInterval. +func SetCommonRetryInterval(getRetryIntervalFunc GetRetryIntervalFunc) *Client { + return defaultClient.SetCommonRetryInterval(getRetryIntervalFunc) +} + +// SetCommonRetryFixedInterval is a global wrapper methods which delegated +// to the default client's Client.SetCommonRetryFixedInterval. +func SetCommonRetryFixedInterval(interval time.Duration) *Client { + return defaultClient.SetCommonRetryFixedInterval(interval) +} + +// SetCommonRetryBackoffInterval is a global wrapper methods which delegated +// to the default client's Client.SetCommonRetryBackoffInterval. +func SetCommonRetryBackoffInterval(min, max time.Duration) *Client { + return defaultClient.SetCommonRetryBackoffInterval(min, max) +} + +// SetCommonRetryHook is a global wrapper methods which delegated +// to the default client's Client.SetCommonRetryHook. +func SetCommonRetryHook(hook RetryHookFunc) *Client { + return defaultClient.SetCommonRetryHook(hook) +} + +// AddCommonRetryHook is a global wrapper methods which delegated +// to the default client's Client.AddCommonRetryHook. +func AddCommonRetryHook(hook RetryHookFunc) *Client { + return defaultClient.AddCommonRetryHook(hook) +} + +// SetCommonRetryCondition is a global wrapper methods which delegated +// to the default client's Client.SetCommonRetryCondition. +func SetCommonRetryCondition(condition RetryConditionFunc) *Client { + return defaultClient.SetCommonRetryCondition(condition) +} + +// AddCommonRetryCondition is a global wrapper methods which delegated +// to the default client's Client.AddCommonRetryCondition. +func AddCommonRetryCondition(condition RetryConditionFunc) *Client { + return defaultClient.AddCommonRetryCondition(condition) +} + +// SetResponseBodyTransformer is a global wrapper methods which delegated +// to the default client's Client.SetResponseBodyTransformer. +func SetResponseBodyTransformer(fn func(rawBody []byte, req *Request, resp *Response) (transformedBody []byte, err error)) *Client { + return defaultClient.SetResponseBodyTransformer(fn) +} + +// SetUnixSocket is a global wrapper methods which delegated +// to the default client's Client.SetUnixSocket. +func SetUnixSocket(file string) *Client { + return defaultClient.SetUnixSocket(file) +} + +// SetTLSFingerprint is a global wrapper methods which delegated +// to the default client's Client.SetTLSFingerprint. +func SetTLSFingerprint(clientHelloID utls.ClientHelloID) *Client { + return defaultClient.SetTLSFingerprint(clientHelloID) +} + +// SetTLSFingerprintRandomized is a global wrapper methods which delegated +// to the default client's Client.SetTLSFingerprintRandomized. +func SetTLSFingerprintRandomized() *Client { + return defaultClient.SetTLSFingerprintRandomized() +} + +// SetTLSFingerprintChrome is a global wrapper methods which delegated +// to the default client's Client.SetTLSFingerprintChrome. +func SetTLSFingerprintChrome() *Client { + return defaultClient.SetTLSFingerprintChrome() +} + +// SetTLSFingerprintAndroid is a global wrapper methods which delegated +// to the default client's Client.SetTLSFingerprintAndroid. +func SetTLSFingerprintAndroid() *Client { + return defaultClient.SetTLSFingerprintAndroid() +} + +// SetTLSFingerprint360 is a global wrapper methods which delegated +// to the default client's Client.SetTLSFingerprint360. +func SetTLSFingerprint360() *Client { + return defaultClient.SetTLSFingerprint360() +} + +// SetTLSFingerprintEdge is a global wrapper methods which delegated +// to the default client's Client.SetTLSFingerprintEdge. +func SetTLSFingerprintEdge() *Client { + return defaultClient.SetTLSFingerprintEdge() +} + +// SetTLSFingerprintFirefox is a global wrapper methods which delegated +// to the default client's Client.SetTLSFingerprintFirefox. +func SetTLSFingerprintFirefox() *Client { + return defaultClient.SetTLSFingerprintFirefox() +} + +// SetTLSFingerprintQQ is a global wrapper methods which delegated +// to the default client's Client.SetTLSFingerprintQQ. +func SetTLSFingerprintQQ() *Client { + return defaultClient.SetTLSFingerprintQQ() +} + +// SetTLSFingerprintIOS is a global wrapper methods which delegated +// to the default client's Client.SetTLSFingerprintIOS. +func SetTLSFingerprintIOS() *Client { + return defaultClient.SetTLSFingerprintIOS() +} + +// SetTLSFingerprintSafari is a global wrapper methods which delegated +// to the default client's Client.SetTLSFingerprintSafari. +func SetTLSFingerprintSafari() *Client { + return defaultClient.SetTLSFingerprintSafari() +} + +// GetClient is a global wrapper methods which delegated +// to the default client's Client.GetClient. +func GetClient() *http.Client { + return defaultClient.GetClient() +} + +// NewRequest is a global wrapper methods which delegated +// to the default client's Client.NewRequest. +func NewRequest() *Request { + return defaultClient.R() +} + +// R is a global wrapper methods which delegated +// to the default client's Client.R(). +func R() *Request { + return defaultClient.R() +} diff --git a/decode.go b/decode.go new file mode 100644 index 00000000..825cffc6 --- /dev/null +++ b/decode.go @@ -0,0 +1,112 @@ +package req + +import ( + "github.com/imroc/req/v3/internal/charsets" + "io" + "strings" +) + +var textContentTypes = []string{"text", "json", "xml", "html", "java"} + +var autoDecodeText = autoDecodeContentTypeFunc(textContentTypes...) + +func autoDecodeContentTypeFunc(contentTypes ...string) func(contentType string) bool { + return func(contentType string) bool { + for _, ct := range contentTypes { + if strings.Contains(contentType, ct) { + return true + } + } + return false + } +} + +type decodeReaderCloser struct { + io.ReadCloser + decodeReader io.Reader +} + +func (d *decodeReaderCloser) Read(p []byte) (n int, err error) { + return d.decodeReader.Read(p) +} + +func newAutoDecodeReadCloser(input io.ReadCloser, t *Transport) *autoDecodeReadCloser { + return &autoDecodeReadCloser{ReadCloser: input, t: t} +} + +type autoDecodeReadCloser struct { + io.ReadCloser + t *Transport + decodeReader io.Reader + detected bool + peek []byte +} + +func (a *autoDecodeReadCloser) peekRead(p []byte) (n int, err error) { + n, err = a.ReadCloser.Read(p) + if n == 0 || (err != nil && err != io.EOF) { + return + } + a.detected = true + enc, name := charsets.FindEncoding(p) + if enc == nil { + return + } + if a.t.Debugf != nil { + a.t.Debugf("charset %s found in body's meta, auto-decode to utf-8", name) + } + dc := enc.NewDecoder() + a.decodeReader = dc.Reader(a.ReadCloser) + var pp []byte + pp, err = dc.Bytes(p[:n]) + if err != nil { + return + } + if len(pp) > len(p) { + a.peek = make([]byte, len(pp)-len(p)) + copy(a.peek, pp[len(p):]) + copy(p, pp[:len(p)]) + n = len(p) + return + } + copy(p, pp) + n = len(p) + return +} + +func (a *autoDecodeReadCloser) peekDrain(p []byte) (n int, err error) { + if len(a.peek) > len(p) { + copy(p, a.peek[:len(p)]) + peek := make([]byte, len(a.peek)-len(p)) + copy(peek, a.peek[len(p):]) + a.peek = peek + n = len(p) + return + } + if len(a.peek) == len(p) { + copy(p, a.peek) + n = len(p) + a.peek = nil + return + } + pp := make([]byte, len(p)-len(a.peek)) + nn, err := a.decodeReader.Read(pp) + n = len(a.peek) + nn + copy(p[:len(a.peek)], a.peek) + copy(p[len(a.peek):], pp[:nn]) + a.peek = nil + return +} + +func (a *autoDecodeReadCloser) Read(p []byte) (n int, err error) { + if !a.detected { + return a.peekRead(p) + } + if a.peek != nil { + return a.peekDrain(p) + } + if a.decodeReader != nil { + return a.decodeReader.Read(p) + } + return a.ReadCloser.Read(p) // can not determine charset, not decode +} diff --git a/decode_test.go b/decode_test.go new file mode 100644 index 00000000..e65a8ea6 --- /dev/null +++ b/decode_test.go @@ -0,0 +1,17 @@ +package req + +import ( + "github.com/imroc/req/v3/internal/tests" + "testing" +) + +func TestPeekDrain(t *testing.T) { + a := autoDecodeReadCloser{peek: []byte("test")} + p := make([]byte, 2) + n, _ := a.peekDrain(p) + tests.AssertEqual(t, 2, n) + tests.AssertEqual(t, true, a.peek != nil) + n, _ = a.peekDrain(p) + tests.AssertEqual(t, 2, n) + tests.AssertEqual(t, true, a.peek == nil) +} diff --git a/digest.go b/digest.go new file mode 100644 index 00000000..442b5953 --- /dev/null +++ b/digest.go @@ -0,0 +1,278 @@ +package req + +import ( + "crypto/md5" + "crypto/rand" + "crypto/sha256" + "crypto/sha512" + "errors" + "fmt" + "hash" + "io" + "net/http" + "strings" + + "github.com/imroc/req/v3/internal/header" +) + +var ( + errDigestBadChallenge = errors.New("digest: challenge is bad") + errDigestCharset = errors.New("digest: unsupported charset") + errDigestAlgNotSupported = errors.New("digest: algorithm is not supported") + errDigestQopNotSupported = errors.New("digest: no supported qop in list") +) + +var hashFuncs = map[string]func() hash.Hash{ + "": md5.New, + "MD5": md5.New, + "MD5-sess": md5.New, + "SHA-256": sha256.New, + "SHA-256-sess": sha256.New, + "SHA-512-256": sha512.New, + "SHA-512-256-sess": sha512.New, +} + +// create response middleware for http digest authentication. +func handleDigestAuthFunc(username, password string) ResponseMiddleware { + return func(client *Client, resp *Response) error { + if resp.Err != nil || resp.StatusCode != http.StatusUnauthorized { + return nil + } + auth, err := createDigestAuth(resp.Response, username, password) + if err != nil { + return err + } + r := resp.Request + req := *r.RawRequest + if req.Body != nil { + err = parseRequestBody(client, r) // re-setup body + if err != nil { + return err + } + if r.GetBody != nil { + body, err := r.GetBody() + if err != nil { + return err + } + req.Body = body + req.GetBody = r.GetBody + } + } + if req.Header == nil { + req.Header = make(http.Header) + } + req.Header.Set(header.Authorization, auth) + resp.Response, err = client.GetTransport().RoundTrip(&req) + return err + } +} + +func createDigestAuth(resp *http.Response, username, password string) (auth string, err error) { + chal := resp.Header.Get(header.WwwAuthenticate) + if chal == "" { + return "", errDigestBadChallenge + } + + c, err := parseChallenge(chal) + if err != nil { + return "", err + } + + // Form credentials based on the challenge + cr := newCredentials(resp.Request.URL.RequestURI(), resp.Request.Method, username, password, c) + auth, err = cr.authorize() + return +} + +func newCredentials(digestURI, method, username, password string, c *challenge) *credentials { + return &credentials{ + username: username, + userhash: c.userhash, + realm: c.realm, + nonce: c.nonce, + digestURI: digestURI, + algorithm: c.algorithm, + sessionAlg: strings.HasSuffix(c.algorithm, "-sess"), + opaque: c.opaque, + messageQop: c.qop, + nc: 0, + method: method, + password: password, + } +} + +type challenge struct { + realm string + domain string + nonce string + opaque string + stale string + algorithm string + qop string + userhash string +} + +func parseChallenge(input string) (*challenge, error) { + const ws = " \n\r\t" + const qs = `"` + s := strings.Trim(input, ws) + if !strings.HasPrefix(s, "Digest ") { + return nil, errDigestBadChallenge + } + s = strings.Trim(s[7:], ws) + sl := strings.Split(s, ",") + c := &challenge{} + var r []string + for i := range sl { + r = strings.SplitN(strings.TrimSpace(sl[i]), "=", 2) + if len(r) != 2 { + return nil, errDigestBadChallenge + } + switch r[0] { + case "realm": + c.realm = strings.Trim(r[1], qs) + case "domain": + c.domain = strings.Trim(r[1], qs) + case "nonce": + c.nonce = strings.Trim(r[1], qs) + case "opaque": + c.opaque = strings.Trim(r[1], qs) + case "stale": + c.stale = strings.Trim(r[1], qs) + case "algorithm": + c.algorithm = strings.Trim(r[1], qs) + case "qop": + c.qop = strings.Trim(r[1], qs) + case "charset": + if strings.ToUpper(strings.Trim(r[1], qs)) != "UTF-8" { + return nil, errDigestCharset + } + case "userhash": + c.userhash = strings.Trim(r[1], qs) + default: + return nil, errDigestBadChallenge + } + } + return c, nil +} + +type credentials struct { + username string + userhash string + realm string + nonce string + digestURI string + algorithm string + sessionAlg bool + cNonce string + opaque string + messageQop string + nc int + method string + password string +} + +func (c *credentials) authorize() (string, error) { + if _, ok := hashFuncs[c.algorithm]; !ok { + return "", errDigestAlgNotSupported + } + + if err := c.validateQop(); err != nil { + return "", err + } + + resp, err := c.resp() + if err != nil { + return "", err + } + + sl := make([]string, 0, 10) + if c.userhash == "true" { + // RFC 7616 3.4.4 + c.username = c.h(fmt.Sprintf("%s:%s", c.username, c.realm)) + sl = append(sl, fmt.Sprintf(`userhash=%s`, c.userhash)) + } + sl = append(sl, fmt.Sprintf(`username="%s"`, c.username)) + sl = append(sl, fmt.Sprintf(`realm="%s"`, c.realm)) + sl = append(sl, fmt.Sprintf(`nonce="%s"`, c.nonce)) + sl = append(sl, fmt.Sprintf(`uri="%s"`, c.digestURI)) + sl = append(sl, fmt.Sprintf(`response="%s"`, resp)) + sl = append(sl, fmt.Sprintf(`algorithm=%s`, c.algorithm)) + if c.opaque != "" { + sl = append(sl, fmt.Sprintf(`opaque="%s"`, c.opaque)) + } + if c.messageQop != "" { + sl = append(sl, fmt.Sprintf("qop=%s", c.messageQop)) + sl = append(sl, fmt.Sprintf("nc=%08x", c.nc)) + sl = append(sl, fmt.Sprintf(`cnonce="%s"`, c.cNonce)) + } + + return fmt.Sprintf("Digest %s", strings.Join(sl, ", ")), nil +} + +func (c *credentials) validateQop() error { + // Currently only supporting auth quality of protection. TODO: add auth-int support + if c.messageQop == "" { + return nil + } + possibleQops := strings.Split(c.messageQop, ", ") + var authSupport bool + for _, qop := range possibleQops { + if qop == "auth" { + authSupport = true + break + } + } + if !authSupport { + return errDigestQopNotSupported + } + + return nil +} + +func (c *credentials) h(data string) string { + hfCtor := hashFuncs[c.algorithm] + hf := hfCtor() + _, _ = hf.Write([]byte(data)) // Hash.Write never returns an error + return fmt.Sprintf("%x", hf.Sum(nil)) +} + +func (c *credentials) resp() (string, error) { + c.nc++ + + b := make([]byte, 16) + _, err := io.ReadFull(rand.Reader, b) + if err != nil { + return "", err + } + c.cNonce = fmt.Sprintf("%x", b)[:32] + + ha1 := c.ha1() + ha2 := c.ha2() + + if len(c.messageQop) == 0 { + return c.h(fmt.Sprintf("%s:%s:%s", ha1, c.nonce, ha2)), nil + } + return c.kd(ha1, fmt.Sprintf("%s:%08x:%s:%s:%s", + c.nonce, c.nc, c.cNonce, c.messageQop, ha2)), nil +} + +func (c *credentials) kd(secret, data string) string { + return c.h(fmt.Sprintf("%s:%s", secret, data)) +} + +// RFC 7616 3.4.2 +func (c *credentials) ha1() string { + ret := c.h(fmt.Sprintf("%s:%s:%s", c.username, c.realm, c.password)) + if c.sessionAlg { + return c.h(fmt.Sprintf("%s:%s:%s", ret, c.nonce, c.cNonce)) + } + + return ret +} + +// RFC 7616 3.4.3 +func (c *credentials) ha2() string { + // currently no auth-int support + return c.h(fmt.Sprintf("%s:%s", c.method, c.digestURI)) +} diff --git a/doc/README_cn.md b/doc/README_cn.md deleted file mode 100644 index d5405bb2..00000000 --- a/doc/README_cn.md +++ /dev/null @@ -1,295 +0,0 @@ -# req -[![GoDoc](https://godoc.org/github.com/imroc/req?status.svg)](https://godoc.org/github.com/imroc/req) - -Go璇█浜烘у寲HTTP璇锋眰搴 - - -鐗规 -======== - -- 杞婚噺绾 -- 绠鍗 -- 瀹规槗鎿嶄綔JSON鍜孹ML -- 瀹规槗璋冭瘯鍜屾棩蹇楄褰 -- 瀹规槗涓婁紶鍜屼笅杞芥枃浠 -- 瀹规槗绠$悊Cookie -- 瀹规槗璁剧疆浠g悊 -- 瀹规槗璁剧疆瓒呮椂 -- 瀹规槗鑷畾涔塇TTP瀹㈡埛绔 - -瀹夎 -======= -``` sh -go get github.com/imroc/req -``` - -姒傝 -======= -`req` 鍩轰簬鏍囧噯搴 `net/http` 瀹炵幇浜嗕竴涓弸濂界殑API. - -`Req` 鍜 `Resp` 鏄袱涓渶閲嶈鐨勭粨鏋勪綋, 浣犲彲浠ユ妸 `Req` 鐪嬩綔瀹㈡埛绔紝 鎶奰Resp` 鐪嬩綔瀛樻斁璇锋眰鍙婂叾鍝嶅簲鐨勫鍣紝瀹冧滑閮芥彁渚涜澶氱畝娲佹柟渚跨殑API锛岃浣犲彲浠ュ緢杞绘澗鍋氬緢澶氬緢澶氫簨鎯呫 -``` go -func (r *Req) Post(url string, v ...interface{}) (*Resp, error) -``` - -澶у鎯呭喌涓嬶紝鍙戣捣璇锋眰鍙湁url鏄繀閫夊弬鏁帮紝鍏跺畠閮藉彲閫夛紝姣斿璇锋眰澶淬佽姹傚弬鏁般佹枃浠舵垨璇锋眰浣撶瓑銆 - -鍖呬腑鍚竴涓粯璁ょ殑 `Req` 瀵硅薄, 瀹冩墍鏈夌殑鍏湁鏂规硶閮借`req`鍖呭搴旂殑鍏湁鏂规硶鍖呰浜嗭紝鎵浠ュぇ澶氭暟鎯呭喌涓嬶紝浣犵洿鎺ュ彲浠ユ妸`req`鍖呯湅浣滀竴涓猔Req`瀵硅薄鏉ヤ娇鐢ㄣ -``` go -// 鍒涘缓Req瀵硅薄鏉ュ彂璧疯姹 -r := req.New() -r.Get(url) - -// 鐩存帴浣跨敤req鍖呭彂璧疯姹 -req.Get(url) -``` -浣犲彲浠ヤ娇鐢 `req.New()` 鏂规硶鏉ュ垱寤 `*Req` 浣滀负涓涓崟鐙殑瀹㈡埛绔 - -渚嬪瓙 -======= -[鍩虹鐢ㄦ硶](#Basic) -[璁剧疆璇锋眰澶碷(#Set-Header) -[璁剧疆璇锋眰鍙傛暟](#Set-Param) -[璁剧疆璇锋眰浣揮(#Set-Body) -[璋冭瘯](#Debug) -[杈撳嚭鏍煎紡](#Format) -[ToJSON & ToXML](#ToJSON-ToXML) -[鑾峰彇 *http.Response](#Response) -[涓婁紶](#Upload) -[涓嬭浇](#Download) -[Cookie](#Cookie) -[璁剧疆瓒呮椂](#Set-Timeout) -[璁剧疆浠g悊](#Set-Proxy) -[鑷畾涔 http.Client](#Customize-Client) - -## 鍩虹鐢ㄦ硶 -``` go -header := req.Header{ - "Accept": "application/json", - "Authorization": "Basic YWRtaW46YWRtaW4=", -} -param := req.Param{ - "name": "imroc", - "cmd": "add", -} -// 鍙湁url蹇呴夛紝鍏跺畠鍙傛暟閮芥槸鍙 -r, err = req.Post("http://foo.bar/api", header, param) -if err != nil { - log.Fatal(err) -} -r.ToJSON(&foo) // 鍝嶅簲浣撹浆鎴愬璞 -log.Printf("%+v", r) // 鎵撳嵃璇︾粏淇℃伅 -``` - -## 璁剧疆璇锋眰澶 -浣跨敤 `req.Header` (瀹冨疄闄呬笂鏄竴涓 `map[string]string`) -``` go -authHeader := req.Header{ - "Accept": "application/json", - "Authorization": "Basic YWRtaW46YWRtaW4=", -} -req.Get("https://www.baidu.com", authHeader, req.Header{"User-Agent": "V1.1"}) -``` -浣跨敤 `http.Header` -``` go -header := make(http.Header) -header.Set("Accept", "application/json") -req.Get("https://www.baidu.com", header) -``` - -## 璁剧疆璇锋眰鍙傛暟 -Use `req.Param` (瀹冨疄闄呬笂鏄竴涓 `map[string]interface{}`) -``` go -param := req.Param{ - "id": "imroc", - "pwd": "roc", -} -req.Get("http://foo.bar/api", param) // http://foo.bar/api?id=imroc&pwd=roc -req.Post(url, param) // 璇锋眰浣 => id=imroc&pwd=roc -``` -浣跨敤 `req.QueryParam` 寮哄埗灏嗚姹傚弬鏁版嫾鍦╱rl鍚庨潰 (瀹冨疄闄呬笂涔熸槸涓涓 `map[string]interface{}`) -``` go -req.Post("http://foo.bar/api", req.Param{"name": "roc", "age": "22"}, req.QueryParam{"access_token": "fedledGF9Hg9ehTU"}) -/* -POST /api?access_token=fedledGF9Hg9ehTU HTTP/1.1 -Host: foo.bar -User-Agent: Go-http-client/1.1 -Content-Length: 15 -Content-Type: application/x-www-form-urlencoded;charset=UTF-8 -Accept-Encoding: gzip - -age=22&name=roc -*/ -``` - -## 璁剧疆璇锋眰浣 -Put `string`, `[]byte` and `io.Reader` as body directly. -``` go -req.Post(url, "id=roc&cmd=query") -``` -灏嗗璞′綔涓篔SON鎴朮ML璇锋眰浣擄紙鑷姩娣诲姞 `Content-Type` 璇锋眰澶达級 -``` go -req.Post(url, req.BodyJSON(&foo)) -req.Post(url, req.BodyXML(&bar)) -``` - -## 璋冭瘯 -灏嗗叏灞鍙橀噺 `req.Debug` 璁剧疆涓篳true`锛屽皢浼氭妸鎵鏈夎姹傜殑璇︾粏淇℃伅鎵撳嵃鍦ㄦ爣鍑嗚緭鍑恒 -``` go -req.Debug = true -req.Post("http://localhost/test" "hi") -``` -![post](post.png) - -## 杈撳嚭鏍煎紡 -鎮ㄥ彲浠ヤ娇鐢ㄦ寚瀹氱被鍨嬬殑杈撳嚭鏍煎紡鍦ㄦ棩蹇楁枃浠朵腑璁板綍璇锋眰鍜屽搷搴旂殑淇℃伅銆備緥濡傦紝鍦ㄥ紑鍙戦樁娈典娇鐢╜锛+v`鏍煎紡锛屽彲浠ヨ浣犺瀵熻姹傚拰鍝嶅簲鐨勭粏鑺備俊鎭 鍦ㄧ敓浜ч樁娈典娇鐢╜锛卾`鎴朻锛-v`杈撳嚭鏍煎紡锛屽彧璁板綍鎵闇瑕佺殑淇℃伅銆 - -### `%+v` 鎴 `%+s` -璇︾粏杈撳嚭 -``` go -r, _ := req.Post(url, header, param) -log.Printf("%+v", r) // 杈撳嚭鏍煎紡鍜孌ebug寮鍚椂鐨勬牸寮忎竴鏍 -``` - -### `%v` 鎴 `%s` -绠鍗曡緭鍑猴紙榛樿鏍煎紡锛 -``` go -r, _ := req.Get(url, param) -log.Printf("%v\n", r) // GET http://foo.bar/api?name=roc&cmd=add {"code":"0","msg":"success"} -log.Prinln(r) // 鍜屼笂闈竴鏍 -``` - -### `%-v` 鎴 `%-s` -绠鍗曡緭鍑哄苟淇濇寔鎵鏈夊唴瀹瑰湪涓琛屽唴锛堣姹備綋鎴栧搷搴斾綋鍙兘鍖呭惈澶氳锛岃繖绉嶆牸寮忎細灏嗘墍鏈夋崲琛屻佸洖杞︽浛鎹㈡垚`" "`, 杩欏湪浼氳浣犲湪鏌ユ棩蹇楃殑鏃跺欓潪甯告湁鐢級 - -### Flag -浣犲彲浠ヨ皟鐢 `SetFlags` 鎺у埗杈撳嚭鍐呭锛屽喅瀹氬摢浜涢儴鍒嗚兘澶熻杈撳嚭銆 -``` go -const ( - LreqHead = 1 << iota // 杈撳嚭璇锋眰棣栭儴锛堝寘鍚姹傝鍜岃姹傚ご锛 - LreqBody // 杈撳嚭璇锋眰浣 - LrespHead // 杈撳嚭鍝嶅簲棣栭儴锛堝寘鍚搷搴旇鍜屽搷搴斿ご锛 - LrespBody // 杈撳嚭鍝嶅簲浣 - Lcost // 杈撳嚭璇锋眰鎵娑堣楁帀鏃堕暱 - LstdFlags = LreqHead | LreqBody | LrespHead | LrespBody -) -``` -``` go -req.SetFlags(req.LreqHead | req.LreqBody | req.LrespHead) -``` - -### 鐩戞帶璇锋眰鑰楁椂 -``` go -req.SetFlags(req.LstdFlags | req.Lcost) // 杈撳嚭鏍煎紡鏄剧ず璇锋眰鑰楁椂 -r,_ := req.Get(url) -log.Println(r) // http://foo.bar/api 3.260802ms {"code":0 "msg":"success"} -if r.Cost() > 3 * time.Second { // 妫鏌ヨ楁椂 - log.Println("WARN: slow request:", r) -} -``` - -## ToJSON & ToXML -``` go -r, _ := req.Get(url) -r.ToJSON(&foo) -r, _ = req.Post(url, req.BodyXML(&bar)) -r.ToXML(&baz) -``` - -## 鑾峰彇 *http.Response -```go -// func (r *Req) Response() *http.Response -r, _ := req.Get(url) -resp := r.Response() -fmt.Println(resp.StatusCode) -``` - -## 涓婁紶 -浣跨敤 `req.File` 鍖归厤鏂囦欢 -``` go -req.Post(url, req.File("imroc.png"), req.File("/Users/roc/Pictures/*.png")) -``` -浣跨敤 `req.FileUpload` 缁嗙矑搴︽帶鍒朵笂浼 -``` go -file, _ := os.Open("imroc.png") -req.Post(url, req.FileUpload{ - File: file, - FieldName: "file", // FieldName 鏄〃鍗曞瓧娈靛悕 - FileName: "avatar.png", // Filename 鏄涓婁紶鐨勬枃浠剁殑鍚嶇О锛屾垜浠娇鐢ㄥ畠鏉ョ寽娴媘imetype锛屽苟灏嗗叾涓婁紶鍒版湇鍔″櫒涓 -}) -``` -浣跨敤`req.UploadProgress`鐩戝惉涓婁紶杩涘害 -```go -progress := func(current, total int64) { - fmt.Println(float32(current)/float32(total)*100, "%") -} -req.Post(url, req.File("/Users/roc/Pictures/*.png"), req.UploadProgress(progress)) -fmt.Println("upload complete") -``` - -## 涓嬭浇 -``` go -r, _ := req.Get(url) -r.ToFile("imroc.png") -``` -浣跨敤`req.DownloadProgress`鐩戝惉涓嬭浇杩涘害 -```go -progress := func(current, total int64) { - fmt.Println(float32(current)/float32(total)*100, "%") -} -r, _ := req.Get(url, req.DownloadProgress(progress)) -r.ToFile("hello.mp4") -fmt.Println("download complete") -``` - -## Cookie -榛樿鎯呭喌涓嬶紝搴曞眰鐨 `*http.Client` 浼氳嚜鍔ㄧ鐞嗕綘鐨刢ookie锛堝鏋滄湇鍔″櫒缁欎綘鍙戜簡cookie锛屼箣鍚庣殑璇锋眰瀹冧細鑷姩甯︿笂cookie璇锋眰澶寸粰鏈嶅姟鍣級, 浣犲彲浠ヨ皟鐢ㄨ繖涓柟娉曞彇娑堣嚜鍔ㄧ鐞嗭細 -``` go -req.EnableCookie(false) -``` -浣犺繕鍙互鍦ㄥ彂閫佽姹傜殑鏃跺欒嚜宸变紶鍏 `*http.Cookie` -``` go -cookie := new(http.Cookie) -// ...... -req.Get(url, cookie) -``` - -## 璁剧疆瓒呮椂 -``` go -req.SetTimeout(50 * time.Second) -``` - -## 璁剧疆浠g悊 -榛樿鎯呭喌涓嬶紝濡傛灉绯荤粺鐜鍙橀噺鏈 `http_proxy` 鎴 `https_proxy` 锛宺eq浼氳瀵瑰簲鐨勫湴鍧浣滀负瀵瑰簲鍗忚鐨勪唬鐞嗭紝浣犱篃鍙互鑷畾涔夎缃唬鐞嗭紝鎴栬呭皢鍏剁疆涓篳nil`锛屽嵆鍙栨秷浠g悊銆 -``` go -req.SetProxy(func(r *http.Request) (*url.URL, error) { - if strings.Contains(r.URL.Hostname(), "google") { - return url.Parse("http://my.vpn.com:23456") - } - return nil, nil -}) -``` -璁剧疆绠鍗曚唬鐞嗭紙灏嗘墍鏈夎姹傞兘杞彂鍒版寚瀹氫唬鐞唘rl鍦板潃涓婏級 -``` go -req.SetProxyUrl("http://my.proxy.com:23456") -``` - -## 鑷畾涔塇TTP瀹㈡埛绔 -浣跨敤 `SetClient` 鏀瑰彉搴曞眰鐨 `*http.Client` -``` go -req.SetClient(client) -``` -缁欐煇涓姹傚埗瀹氱壒瀹氱殑 `*http.Client` -``` go -client := &http.Client{Timeout: 30 * time.Second} -req.Get(url, client) -``` -鏀瑰彉搴曞眰 `*http.Client` 鐨勬煇浜涘睘鎬 -``` go -req.Client().Jar, _ = cookiejar.New(nil) -trans, _ := req.Client().Transport.(*http.Transport) -trans.MaxIdleConns = 20 -trans.TLSHandshakeTimeout = 20 * time.Second -trans.DisableKeepAlives = true -trans.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} -``` diff --git a/doc/post.png b/doc/post.png deleted file mode 100644 index 934d867b..00000000 Binary files a/doc/post.png and /dev/null differ diff --git a/dump.go b/dump.go index 6c17cdaf..77f96349 100644 --- a/dump.go +++ b/dump.go @@ -1,204 +1,127 @@ package req import ( - "bufio" - "bytes" + "github.com/imroc/req/v3/internal/dump" "io" - "io/ioutil" - "net" - "net/http" - "net/http/httputil" - "net/url" - "strings" - "time" + "os" ) -// Debug enable debug mode if set to true -var Debug bool - -// dumpConn is a net.Conn which writes to Writer and reads from Reader -type dumpConn struct { - io.Writer - io.Reader +// DumpOptions controls the dump behavior. +type DumpOptions struct { + Output io.Writer + RequestOutput io.Writer + ResponseOutput io.Writer + RequestHeaderOutput io.Writer + RequestBodyOutput io.Writer + ResponseHeaderOutput io.Writer + ResponseBodyOutput io.Writer + RequestHeader bool + RequestBody bool + ResponseHeader bool + ResponseBody bool + Async bool } -func (c *dumpConn) Close() error { return nil } -func (c *dumpConn) LocalAddr() net.Addr { return nil } -func (c *dumpConn) RemoteAddr() net.Addr { return nil } -func (c *dumpConn) SetDeadline(t time.Time) error { return nil } -func (c *dumpConn) SetReadDeadline(t time.Time) error { return nil } -func (c *dumpConn) SetWriteDeadline(t time.Time) error { return nil } +// Clone return a copy of DumpOptions +func (do *DumpOptions) Clone() *DumpOptions { + if do == nil { + return nil + } + d := *do + return &d +} -// delegateReader is a reader that delegates to another reader, -// once it arrives on a channel. -type delegateReader struct { - c chan io.Reader - r io.Reader // nil until received from c +type dumpOptions struct { + *DumpOptions } -func (r *delegateReader) Read(p []byte) (int, error) { - if r.r == nil { - r.r = <-r.c +func (o dumpOptions) Output() io.Writer { + if o.DumpOptions.Output == nil { + return os.Stdout } - return r.r.Read(p) + return o.DumpOptions.Output } -type dummyBody struct { - N int - off int +func (o dumpOptions) RequestHeaderOutput() io.Writer { + if o.DumpOptions.RequestHeaderOutput != nil { + return o.DumpOptions.RequestHeaderOutput + } + if o.DumpOptions.RequestOutput != nil { + return o.DumpOptions.RequestOutput + } + return o.Output() } -func (d *dummyBody) Read(p []byte) (n int, err error) { - if d.N <= 0 { - err = io.EOF - return +func (o dumpOptions) RequestBodyOutput() io.Writer { + if o.DumpOptions.RequestBodyOutput != nil { + return o.DumpOptions.RequestBodyOutput } - left := d.N - d.off - if left <= 0 { - err = io.EOF - return + if o.DumpOptions.RequestOutput != nil { + return o.DumpOptions.RequestOutput } + return o.Output() +} - if l := len(p); l > 0 { - if l >= left { - n = left - err = io.EOF - } else { - n = l - } - d.off += n - for i := 0; i < n; i++ { - p[i] = '*' - } +func (o dumpOptions) ResponseHeaderOutput() io.Writer { + if o.DumpOptions.ResponseHeaderOutput != nil { + return o.DumpOptions.ResponseHeaderOutput } - - return + if o.DumpOptions.ResponseOutput != nil { + return o.DumpOptions.ResponseOutput + } + return o.Output() } -func (d *dummyBody) Close() error { - return nil +func (o dumpOptions) ResponseBodyOutput() io.Writer { + if o.DumpOptions.ResponseBodyOutput != nil { + return o.DumpOptions.ResponseBodyOutput + } + if o.DumpOptions.ResponseOutput != nil { + return o.DumpOptions.ResponseOutput + } + return o.Output() } -type dumpBuffer struct { - bytes.Buffer +func (o dumpOptions) RequestHeader() bool { + return o.DumpOptions.RequestHeader } -func (b *dumpBuffer) Write(p []byte) { - b.Buffer.Write(p) - b.Buffer.WriteString("\r\n\r\n") +func (o dumpOptions) RequestBody() bool { + return o.DumpOptions.RequestBody } -func (b *dumpBuffer) WriteString(s string) { - b.Write([]byte(s)) +func (o dumpOptions) ResponseHeader() bool { + return o.DumpOptions.ResponseHeader } -func (r *Resp) dumpRequest(dump *dumpBuffer) { - head := r.r.flag&LreqHead != 0 - body := r.r.flag&LreqBody != 0 - - if head { - r.dumpReqHead(dump) - } - if body { - if r.multipartHelper != nil { - dump.Write(r.multipartHelper.Dump()) - } else if len(r.reqBody) > 0 { - dump.Write(r.reqBody) - } - } +func (o dumpOptions) ResponseBody() bool { + return o.DumpOptions.ResponseBody } -func (r *Resp) dumpReqHead(dump *dumpBuffer) { - reqSend := new(http.Request) - *reqSend = *r.req - if reqSend.URL.Scheme == "https" { - reqSend.URL = new(url.URL) - *reqSend.URL = *r.req.URL - reqSend.URL.Scheme = "http" - } - - if reqSend.ContentLength > 0 { - reqSend.Body = &dummyBody{N: int(reqSend.ContentLength)} - } else { - reqSend.Body = &dummyBody{N: 1} - } +func (o dumpOptions) Async() bool { + return o.DumpOptions.Async +} - // Use the actual Transport code to record what we would send - // on the wire, but not using TCP. Use a Transport with a - // custom dialer that returns a fake net.Conn that waits - // for the full input (and recording it), and then responds - // with a dummy response. - var buf bytes.Buffer // records the output - pr, pw := io.Pipe() - defer pw.Close() - dr := &delegateReader{c: make(chan io.Reader)} - - t := &http.Transport{ - Dial: func(net, addr string) (net.Conn, error) { - return &dumpConn{io.MultiWriter(&buf, pw), dr}, nil - }, - } - defer t.CloseIdleConnections() - - client := new(http.Client) - *client = *r.client - client.Transport = t - - // Wait for the request before replying with a dummy response: - go func() { - req, err := http.ReadRequest(bufio.NewReader(pr)) - if err == nil { - // Ensure all the body is read; otherwise - // we'll get a partial dump. - io.Copy(ioutil.Discard, req.Body) - req.Body.Close() - } - - dr.c <- strings.NewReader("HTTP/1.1 204 No Content\r\nConnection: close\r\n\r\n") - pr.Close() - }() - - _, err := client.Do(reqSend) - if err != nil { - dump.WriteString(err.Error()) - } else { - reqDump := buf.Bytes() - if i := bytes.Index(reqDump, []byte("\r\n\r\n")); i >= 0 { - reqDump = reqDump[:i] - } - dump.Write(reqDump) - } +func (o dumpOptions) Clone() dump.Options { + return dumpOptions{o.DumpOptions.Clone()} } -func (r *Resp) dumpResponse(dump *dumpBuffer) { - head := r.r.flag&LrespHead != 0 - body := r.r.flag&LrespBody != 0 - if head { - respDump, err := httputil.DumpResponse(r.resp, false) - if err != nil { - dump.WriteString(err.Error()) - } else { - if i := bytes.Index(respDump, []byte("\r\n\r\n")); i >= 0 { - respDump = respDump[:i] - } - dump.Write(respDump) - } - } - if body && len(r.Bytes()) > 0 { - dump.Write(r.Bytes()) +func newDefaultDumpOptions() *DumpOptions { + return &DumpOptions{ + Output: os.Stdout, + RequestBody: true, + ResponseBody: true, + ResponseHeader: true, + RequestHeader: true, } } -func (r *Resp) Dump() string { - dump := new(dumpBuffer) - r.dumpRequest(dump) - l := dump.Len() - if l > 0 { - dump.WriteString("=================================") - l = dump.Len() +func newDumper(opt *DumpOptions) *dump.Dumper { + if opt == nil { + opt = newDefaultDumpOptions() } - - r.dumpResponse(dump) - - return dump.String() + if opt.Output == nil { + opt.Output = os.Stderr + } + return dump.NewDumper(dumpOptions{opt}) } diff --git a/dump_test.go b/dump_test.go deleted file mode 100644 index 98d1bbe4..00000000 --- a/dump_test.go +++ /dev/null @@ -1,62 +0,0 @@ -package req - -import ( - "io/ioutil" - "net/http" - "net/http/httptest" - "strings" - "testing" -) - -func TestDumpText(t *testing.T) { - SetFlags(LstdFlags | Lcost) - reqBody := "request body" - respBody := "response body" - reqHeader := "Request-Header" - respHeader := "Response-Header" - handler := func(w http.ResponseWriter, r *http.Request) { - w.Header().Set(respHeader, "req") - w.Write([]byte(respBody)) - } - ts := httptest.NewServer(http.HandlerFunc(handler)) - header := Header{ - reqHeader: "hello", - } - resp, err := Post(ts.URL, header, reqBody) - if err != nil { - t.Fatal(err) - } - dump := resp.Dump() - for _, keyword := range []string{reqBody, respBody, reqHeader, respHeader} { - if !strings.Contains(dump, keyword) { - t.Errorf("dump missing part, want: %s", keyword) - } - } -} - -func TestDumpUpload(t *testing.T) { - SetFlags(LreqBody) - file1 := ioutil.NopCloser(strings.NewReader("file1")) - uploads := []FileUpload{ - { - FileName: "1.txt", - FieldName: "media", - File: file1, - }, - } - ts := newDefaultTestServer() - r, err := Post(ts.URL, uploads, Param{"hello": "req"}) - if err != nil { - t.Fatal(err) - } - dump := r.Dump() - contains := []string{ - `Content-Disposition: form-data; name="hello"`, - `Content-Disposition: form-data; name="media"; filename="1.txt"`, - } - for _, contain := range contains { - if !strings.Contains(dump, contain) { - t.Errorf("multipart dump should contains: %s", contain) - } - } -} diff --git a/examples/README.md b/examples/README.md new file mode 100644 index 00000000..5ec6097e --- /dev/null +++ b/examples/README.md @@ -0,0 +1,4 @@ +# examples + +* [find-popular-repo](find-popular-repo): Invoke github api to find someone's most popular repo. +* [upload](upload): Use `req` to upload files. Contains a server written with `gin` and a client written with `req` \ No newline at end of file diff --git a/examples/find-popular-repo/README.md b/examples/find-popular-repo/README.md new file mode 100644 index 00000000..cbd79750 --- /dev/null +++ b/examples/find-popular-repo/README.md @@ -0,0 +1,17 @@ +# find-popular-repo + +This is a runable example of req, using the Github API [List repositories for a user](https://docs.github.com/cn/rest/reference/repos#list-repositories-for-a-user) to find someone's the most popular github repo. + +## How to run + +```bash +go run . +``` + +## Modify it + +Change the global `username` vairable to your own github username: + +```go +var username = "imroc" +``` \ No newline at end of file diff --git a/examples/find-popular-repo/go.mod b/examples/find-popular-repo/go.mod new file mode 100644 index 00000000..890f1462 --- /dev/null +++ b/examples/find-popular-repo/go.mod @@ -0,0 +1,31 @@ +module find-popular-repo + +go 1.22.0 + +toolchain go1.22.3 + +replace github.com/imroc/req/v3 => ../../ + +require github.com/imroc/req/v3 v3.0.0 + +require ( + github.com/cheekybits/genny v1.0.0 // indirect + github.com/fsnotify/fsnotify v1.5.4 // indirect + github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/marten-seemann/qpack v0.2.1 // indirect + github.com/marten-seemann/qtls-go1-16 v0.1.5 // indirect + github.com/marten-seemann/qtls-go1-17 v0.1.2 // indirect + github.com/marten-seemann/qtls-go1-18 v0.1.3 // indirect + github.com/marten-seemann/qtls-go1-19 v0.1.1 // indirect + github.com/nxadm/tail v1.4.8 // indirect + github.com/onsi/ginkgo v1.16.5 // indirect + golang.org/x/crypto v0.29.0 // indirect + golang.org/x/mod v0.22.0 // indirect + golang.org/x/net v0.31.0 // indirect + golang.org/x/sys v0.27.0 // indirect + golang.org/x/text v0.20.0 // indirect + golang.org/x/tools v0.27.0 // indirect + gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect +) diff --git a/examples/find-popular-repo/go.sum b/examples/find-popular-repo/go.sum new file mode 100644 index 00000000..b70f2661 --- /dev/null +++ b/examples/find-popular-repo/go.sum @@ -0,0 +1,332 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo= +dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= +dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= +dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= +dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= +git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= +github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/cheekybits/genny v1.0.0 h1:uGGa4nei+j20rOSeDeP5Of12XVm7TGUd4dJA9RDitfE= +github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= +github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/marten-seemann/qpack v0.2.1 h1:jvTsT/HpCn2UZJdP+UUB53FfUUgeOyG5K1ns0OJOGVs= +github.com/marten-seemann/qpack v0.2.1/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc= +github.com/marten-seemann/qtls-go1-16 v0.1.5 h1:o9JrYPPco/Nukd/HpOHMHZoBDXQqoNtUCmny98/1uqQ= +github.com/marten-seemann/qtls-go1-16 v0.1.5/go.mod h1:gNpI2Ol+lRS3WwSOtIUUtRwZEQMXjYK+dQSBFbethAk= +github.com/marten-seemann/qtls-go1-17 v0.1.2 h1:JADBlm0LYiVbuSySCHeY863dNkcpMmDR7s0bLKJeYlQ= +github.com/marten-seemann/qtls-go1-17 v0.1.2/go.mod h1:C2ekUKcDdz9SDWxec1N/MvcXBpaX9l3Nx67XaR84L5s= +github.com/marten-seemann/qtls-go1-18 v0.1.3 h1:R4H2Ks8P6pAtUagjFty2p7BVHn3XiwDAl7TTQf5h7TI= +github.com/marten-seemann/qtls-go1-18 v0.1.3/go.mod h1:mJttiymBAByA49mhlNZZGrH5u1uXYZJ+RW28Py7f4m4= +github.com/marten-seemann/qtls-go1-19 v0.1.1 h1:mnbxeq3oEyQxQXwI4ReCgW9DPoPR94sNlqWoDZnjRIE= +github.com/marten-seemann/qtls-go1-19 v0.1.1/go.mod h1:5HTDWtVudo/WFsHKRNuOhWlbdjrfs5JHrYb0wIJqGpI= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= +github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.13.0 h1:7lLHu94wT9Ij0o6EWWclhu0aOh32VxhkwEJvzuWPeak= +github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= +github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= +github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= +github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= +github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw= +github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI= +github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU= +github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= +github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg= +github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw= +github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y= +github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q= +github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ= +github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I= +github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0= +github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= +github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk= +github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= +github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= +github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= +github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= +github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= +github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= +go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= +golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= +golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa h1:zuSxTR4o9y82ebqCUJYNGJbGPo6sKVl54f/TVDObg1c= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= +golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= +golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= +golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220809012201-f428fae20770 h1:dIi4qVdvjZEjiMDv7vhokAZNGnz3kepwuXqFKYDdDMs= +golang.org/x/net v0.0.0-20220809012201-f428fae20770/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= +golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= +golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= +golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220808155132-1c4a2a72c664 h1:v1W7bwXHsnLLloWYTVEdvGvA7BHMeBYsPcF0GLDxIRs= +golang.org/x/sys v0.0.0-20220808155132-1c4a2a72c664/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= +golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.11.0/go.mod h1:anzJrxPjNtfgiYQYirP2CPGzGLxrH2u2QBhn6Bf3qY8= +golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= +golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc= +golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= +golang.org/x/tools v0.27.0/go.mod h1:sUi0ZgbwW9ZPAq26Ekut+weQPR5eIM6GQLQ1Yjm1H0Q= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= +google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= +sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/examples/find-popular-repo/main.go b/examples/find-popular-repo/main.go new file mode 100644 index 00000000..44708f3b --- /dev/null +++ b/examples/find-popular-repo/main.go @@ -0,0 +1,97 @@ +package main + +import ( + "fmt" + "strconv" + + "github.com/imroc/req/v3" +) + +// Change the name if you want +var username = "imroc" + +func main() { + repo, star, err := findTheMostPopularRepo(username) + if err != nil { + fmt.Println(err) + return + } + fmt.Printf("The most popular repo of %s is %s, which have %d stars\n", username, repo, star) +} + +func init() { + req.EnableDebugLog(). + EnableTraceAll(). + EnableDumpEachRequest(). + SetCommonErrorResult(&ErrorMessage{}). + OnAfterResponse(func(client *req.Client, resp *req.Response) error { + if resp.Err != nil { + return nil + } + if errMsg, ok := resp.ErrorResult().(*ErrorMessage); ok { + resp.Err = errMsg + return nil + } + if !resp.IsSuccessState() { + resp.Err = fmt.Errorf("bad status: %s\nraw content:\n%s", resp.Status, resp.Dump()) + } + return nil + }) +} + +type Repo struct { + Name string `json:"name"` + Star int `json:"stargazers_count"` +} +type ErrorMessage struct { + Message string `json:"message"` +} + +func (msg *ErrorMessage) Error() string { + return fmt.Sprintf("API Error: %s", msg.Message) +} + +func findTheMostPopularRepo(username string) (repo string, star int, err error) { + var popularRepo Repo + var resp *req.Response + + for page := 1; ; page++ { + repos := []*Repo{} + resp, err = req.SetHeader("Accept", "application/vnd.github.v3+json"). + SetQueryParams(map[string]string{ + "type": "owner", + "page": strconv.Itoa(page), + "per_page": "100", + "sort": "updated", + "direction": "desc", + }). + SetPathParam("username", username). + SetSuccessResult(&repos). + Get("https://api.github.com/users/{username}/repos") + + fmt.Println("TraceInfo:") + fmt.Println("----------") + fmt.Println(resp.TraceInfo()) + fmt.Println() + + if err != nil { + return + } + + if !resp.IsSuccessState() { // HTTP status `code >= 200 and <= 299` is considered as success by default + return + } + for _, repo := range repos { + if repo.Star >= popularRepo.Star { + popularRepo = *repo + } + } + if len(repo) == 100 { // Try Next page + continue + } + // All repos have been traversed, return the final result + repo = popularRepo.Name + star = popularRepo.Star + return + } +} diff --git a/examples/opentelemetry-jaeger-tracing/README.md b/examples/opentelemetry-jaeger-tracing/README.md new file mode 100644 index 00000000..3dfa4a56 --- /dev/null +++ b/examples/opentelemetry-jaeger-tracing/README.md @@ -0,0 +1,30 @@ +# opentelemetry-jaeger-tracing + +This is a runnable example of req, which uses the built-in tiny github sdk built on req to query and display the information of the specified user. + +Best of all, it integrates seamlessly with jaeger tracing and is very easy to extend. + +## How to run + +First, use `docker` or `podman` to start a test jeager container (see jeager official doc: [ Getting Started](https://www.jaegertracing.io/docs/1.37/getting-started/#all-in-one)). + +Then, run example: + +```bash +go run . +``` +```txt +Please give a github username: +``` + +Input a github username, e.g. `imroc`: + +```bash +$ go run . +Please give a github username: imroc +The moust popular repo of roc (https://imroc.cc) is req, which have 2500 stars +``` + +Then enter the Jaeger UI with browser (`http://127.0.0.1:16686/`), checkout the tracing details. + +Run example again, try to input some username that doesn't exist, and check the error log in Jaeger UI. diff --git a/examples/opentelemetry-jaeger-tracing/github/github.go b/examples/opentelemetry-jaeger-tracing/github/github.go new file mode 100644 index 00000000..b7898301 --- /dev/null +++ b/examples/opentelemetry-jaeger-tracing/github/github.go @@ -0,0 +1,192 @@ +package github + +import ( + "context" + "fmt" + "github.com/imroc/req/v3" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" + "strconv" + "strings" +) + +// Client is the go client for GitHub API. +type Client struct { + *req.Client +} + +// APIError represents the error message that GitHub API returns. +// GitHub API doc: https://docs.github.com/en/rest/overview/resources-in-the-rest-api#client-errors +type APIError struct { + Message string `json:"message"` + DocumentationUrl string `json:"documentation_url,omitempty"` + Errors []struct { + Resource string `json:"resource"` + Field string `json:"field"` + Code string `json:"code"` + } `json:"errors,omitempty"` +} + +// Error convert APIError to a human readable error and return. +func (e *APIError) Error() string { + msg := fmt.Sprintf("API error: %s", e.Message) + if e.DocumentationUrl != "" { + return fmt.Sprintf("%s (see doc %s)", msg, e.DocumentationUrl) + } + if len(e.Errors) == 0 { + return msg + } + errs := []string{} + for _, err := range e.Errors { + errs = append(errs, fmt.Sprintf("resource:%s field:%s code:%s", err.Resource, err.Field, err.Code)) + } + return fmt.Sprintf("%s (%s)", msg, strings.Join(errs, " | ")) +} + +// NewClient create a GitHub client. +func NewClient() *Client { + c := req.C(). + // All GitHub API requests need this header. + SetCommonHeader("Accept", "application/vnd.github.v3+json"). + // All GitHub API requests use the same base URL. + SetBaseURL("https://api.github.com"). + // Enable dump at the request-level for each request, and only + // temporarily stores the dump content in memory, so we can call + // resp.Dump() to get the dump content when needed in response + // middleware. + // This is actually a syntax sugar, implemented internally using + // request middleware + EnableDumpEachRequest(). + // Unmarshal response body into an APIError struct when status >= 400. + SetCommonErrorResult(&APIError{}). + // Handle common exceptions in response middleware. + OnAfterResponse(func(client *req.Client, resp *req.Response) error { + if resp.Err != nil { // There is an underlying error, e.g. network error or unmarshal error (SetSuccessResult or SetErrorResult was invoked before). + if dump := resp.Dump(); dump != "" { // Append dump content to original underlying error to help troubleshoot. + resp.Err = fmt.Errorf("error: %s\nraw content:\n%s", resp.Err.Error(), resp.Dump()) + } + return nil // Skip the following logic if there is an underlying error. + } + if err, ok := resp.ErrorResult().(*APIError); ok { // Server returns an error message. + // Convert it to human-readable go error which implements the error interface. + resp.Err = err + return nil + } + // Corner case: neither an error response nor a success response, e.g. status code < 200 or + // code >= 300 && code <= 399, just dump the raw content into error to help troubleshoot. + if !resp.IsSuccessState() { + resp.Err = fmt.Errorf("bad response, raw content:\n%s", resp.Dump()) + } + return nil + }) + + return &Client{ + Client: c, + } +} + +type apiNameType int + +const apiNameKey apiNameType = iota + +// SetTracer set the tracer of opentelemetry. +func (c *Client) SetTracer(tracer trace.Tracer) { + c.WrapRoundTripFunc(func(rt req.RoundTripper) req.RoundTripFunc { + return func(req *req.Request) (resp *req.Response, err error) { + ctx := req.Context() + apiName, ok := ctx.Value(apiNameKey).(string) + if !ok { + apiName = req.URL.Path + } + _, span := tracer.Start(req.Context(), apiName) + defer span.End() + span.SetAttributes( + attribute.String("http.url", req.URL.String()), + attribute.String("http.method", req.Method), + attribute.String("http.req.header", req.HeaderToString()), + ) + if len(req.Body) > 0 { + span.SetAttributes( + attribute.String("http.req.body", string(req.Body)), + ) + } + resp, err = rt.RoundTrip(req) + if err != nil { + span.RecordError(err) + span.SetStatus(codes.Error, err.Error()) + } + if resp.Response != nil { + span.SetAttributes( + attribute.Int("http.status_code", resp.StatusCode), + attribute.String("http.resp.header", resp.HeaderToString()), + attribute.String("http.resp.body", resp.String()), + ) + } + return + } + }) +} + +func withAPIName(ctx context.Context, name string) context.Context { + if ctx == nil { + ctx = context.Background() + } + return context.WithValue(ctx, apiNameKey, name) +} + +type UserProfile struct { + Name string `json:"name"` + Blog string `json:"blog"` +} + +// GetUserProfile returns the user profile for the specified user. +// Github API doc: https://docs.github.com/en/rest/users/users#get-a-user +func (c *Client) GetUserProfile(ctx context.Context, username string) (user *UserProfile, err error) { + err = c.Get("/users/{username}"). + SetPathParam("username", username). + SetSuccessResult(&user). + Do(withAPIName(ctx, "GetUserProfile")).Err + return +} + +type Repo struct { + Name string `json:"name"` + Star int `json:"stargazers_count"` +} + +// ListUserRepo returns a list of public repositories for the specified user +// Github API doc: https://docs.github.com/en/rest/repos/repos#list-repositories-for-a-user +func (c *Client) ListUserRepo(ctx context.Context, username string, page int) (repos []*Repo, err error) { + err = c.Get("/users/{username}/repos"). + SetPathParam("username", username). + SetQueryParamsAnyType(map[string]any{ + "type": "owner", + "page": strconv.Itoa(page), + "per_page": "100", + "sort": "updated", + "direction": "desc", + }). + Do(withAPIName(ctx, "ListUserRepo")). + Into(&repos) + return +} + +// LoginWithToken login with GitHub personal access token. +// GitHub API doc: https://docs.github.com/en/rest/overview/other-authentication-methods#authenticating-for-saml-sso +func (c *Client) LoginWithToken(token string) *Client { + c.SetCommonHeader("Authorization", "token "+token) + return c +} + +// SetDebug enable debug if set to true, disable debug if set to false. +func (c *Client) SetDebug(enable bool) *Client { + if enable { + c.EnableDebugLog() + c.EnableDumpAll() + } else { + c.DisableDebugLog() + c.DisableDumpAll() + } + return c +} diff --git a/examples/opentelemetry-jaeger-tracing/go.mod b/examples/opentelemetry-jaeger-tracing/go.mod new file mode 100644 index 00000000..6454f319 --- /dev/null +++ b/examples/opentelemetry-jaeger-tracing/go.mod @@ -0,0 +1,40 @@ +module opentelemetry-jaeger-tracing + +go 1.22.0 + +toolchain go1.22.3 + +replace github.com/imroc/req/v3 => ../../ + +require ( + github.com/imroc/req/v3 v3.0.0 + go.opentelemetry.io/otel v1.9.0 + go.opentelemetry.io/otel/exporters/jaeger v1.9.0 + go.opentelemetry.io/otel/sdk v1.9.0 + go.opentelemetry.io/otel/trace v1.9.0 +) + +require ( + github.com/cheekybits/genny v1.0.0 // indirect + github.com/fsnotify/fsnotify v1.5.4 // indirect + github.com/go-logr/logr v1.2.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/lucas-clemente/quic-go v0.28.1 // indirect + github.com/marten-seemann/qpack v0.2.1 // indirect + github.com/marten-seemann/qtls-go1-16 v0.1.5 // indirect + github.com/marten-seemann/qtls-go1-17 v0.1.2 // indirect + github.com/marten-seemann/qtls-go1-18 v0.1.2 // indirect + github.com/marten-seemann/qtls-go1-19 v0.1.0 // indirect + github.com/nxadm/tail v1.4.8 // indirect + github.com/onsi/ginkgo v1.16.5 // indirect + golang.org/x/crypto v0.29.0 // indirect + golang.org/x/mod v0.22.0 // indirect + golang.org/x/net v0.31.0 // indirect + golang.org/x/sys v0.27.0 // indirect + golang.org/x/text v0.20.0 // indirect + golang.org/x/tools v0.27.0 // indirect + gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect +) diff --git a/examples/opentelemetry-jaeger-tracing/go.sum b/examples/opentelemetry-jaeger-tracing/go.sum new file mode 100644 index 00000000..768df1c9 --- /dev/null +++ b/examples/opentelemetry-jaeger-tracing/go.sum @@ -0,0 +1,350 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo= +dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= +dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= +dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= +dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= +git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= +github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/cheekybits/genny v1.0.0 h1:uGGa4nei+j20rOSeDeP5Of12XVm7TGUd4dJA9RDitfE= +github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= +github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/lucas-clemente/quic-go v0.28.1 h1:Uo0lvVxWg5la9gflIF9lwa39ONq85Xq2D91YNEIslzU= +github.com/lucas-clemente/quic-go v0.28.1/go.mod h1:oGz5DKK41cJt5+773+BSO9BXDsREY4HLf7+0odGAPO0= +github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/marten-seemann/qpack v0.2.1 h1:jvTsT/HpCn2UZJdP+UUB53FfUUgeOyG5K1ns0OJOGVs= +github.com/marten-seemann/qpack v0.2.1/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc= +github.com/marten-seemann/qtls-go1-16 v0.1.5 h1:o9JrYPPco/Nukd/HpOHMHZoBDXQqoNtUCmny98/1uqQ= +github.com/marten-seemann/qtls-go1-16 v0.1.5/go.mod h1:gNpI2Ol+lRS3WwSOtIUUtRwZEQMXjYK+dQSBFbethAk= +github.com/marten-seemann/qtls-go1-17 v0.1.2 h1:JADBlm0LYiVbuSySCHeY863dNkcpMmDR7s0bLKJeYlQ= +github.com/marten-seemann/qtls-go1-17 v0.1.2/go.mod h1:C2ekUKcDdz9SDWxec1N/MvcXBpaX9l3Nx67XaR84L5s= +github.com/marten-seemann/qtls-go1-18 v0.1.2 h1:JH6jmzbduz0ITVQ7ShevK10Av5+jBEKAHMntXmIV7kM= +github.com/marten-seemann/qtls-go1-18 v0.1.2/go.mod h1:mJttiymBAByA49mhlNZZGrH5u1uXYZJ+RW28Py7f4m4= +github.com/marten-seemann/qtls-go1-19 v0.1.0-beta.1/go.mod h1:5HTDWtVudo/WFsHKRNuOhWlbdjrfs5JHrYb0wIJqGpI= +github.com/marten-seemann/qtls-go1-19 v0.1.0 h1:rLFKD/9mp/uq1SYGYuVZhm83wkmU95pK5df3GufyYYU= +github.com/marten-seemann/qtls-go1-19 v0.1.0/go.mod h1:5HTDWtVudo/WFsHKRNuOhWlbdjrfs5JHrYb0wIJqGpI= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= +github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= +github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= +github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= +github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= +github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw= +github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI= +github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU= +github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= +github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg= +github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw= +github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y= +github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q= +github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ= +github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I= +github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0= +github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= +github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk= +github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= +github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= +github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= +github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= +github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= +github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= +go.opentelemetry.io/otel v1.9.0 h1:8WZNQFIB2a71LnANS9JeyidJKKGOOremcUtb/OtHISw= +go.opentelemetry.io/otel v1.9.0/go.mod h1:np4EoPGzoPs3O67xUVNoPPcmSvsfOxNlNA4F4AC+0Eo= +go.opentelemetry.io/otel/exporters/jaeger v1.9.0 h1:gAEgEVGDWwFjcis9jJTOJqZNxDzoZfR12WNIxr7g9Ww= +go.opentelemetry.io/otel/exporters/jaeger v1.9.0/go.mod h1:hquezOLVAybNW6vanIxkdLXTXvzlj2Vn3wevSP15RYs= +go.opentelemetry.io/otel/sdk v1.9.0 h1:LNXp1vrr83fNXTHgU8eO89mhzxb/bbWAsHG6fNf3qWo= +go.opentelemetry.io/otel/sdk v1.9.0/go.mod h1:AEZc8nt5bd2F7BC24J5R0mrjYnpEgYHyTcM/vrSple4= +go.opentelemetry.io/otel/trace v1.9.0 h1:oZaCNJUjWcg60VXWee8lJKlqhPbXAPB51URuR47pQYc= +go.opentelemetry.io/otel/trace v1.9.0/go.mod h1:2737Q0MuG8q1uILYm2YYVkAyLtOofiTNGg6VODnOiPo= +go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= +golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= +golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa h1:zuSxTR4o9y82ebqCUJYNGJbGPo6sKVl54f/TVDObg1c= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= +golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= +golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= +golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220802222814-0bcc04d9c69b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20220809012201-f428fae20770 h1:dIi4qVdvjZEjiMDv7vhokAZNGnz3kepwuXqFKYDdDMs= +golang.org/x/net v0.0.0-20220809012201-f428fae20770/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= +golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= +golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= +golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220731174439-a90be440212d h1:Sv5ogFZatcgIMMtBSTTAgMYsicp25MXBubjXNDKwm80= +golang.org/x/sys v0.0.0-20220731174439-a90be440212d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220808155132-1c4a2a72c664 h1:v1W7bwXHsnLLloWYTVEdvGvA7BHMeBYsPcF0GLDxIRs= +golang.org/x/sys v0.0.0-20220808155132-1c4a2a72c664/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.11.0/go.mod h1:anzJrxPjNtfgiYQYirP2CPGzGLxrH2u2QBhn6Bf3qY8= +golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= +golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc= +golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= +golang.org/x/tools v0.27.0/go.mod h1:sUi0ZgbwW9ZPAq26Ekut+weQPR5eIM6GQLQ1Yjm1H0Q= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= +google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= +sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/examples/opentelemetry-jaeger-tracing/main.go b/examples/opentelemetry-jaeger-tracing/main.go new file mode 100644 index 00000000..c4620b57 --- /dev/null +++ b/examples/opentelemetry-jaeger-tracing/main.go @@ -0,0 +1,162 @@ +package main + +import ( + "context" + "fmt" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/exporters/jaeger" + "go.opentelemetry.io/otel/sdk/resource" + "go.opentelemetry.io/otel/sdk/trace" + semconv "go.opentelemetry.io/otel/semconv/v1.12.0" + "log" + "opentelemetry-jaeger-tracing/github" + "os" + "os/signal" + "syscall" +) + +const serviceName = "github-query" + +var githubClient *github.Client + +func traceProvider() (*trace.TracerProvider, error) { + // Create the Jaeger exporter + ep := os.Getenv("JAEGER_ENDPOINT") + if ep == "" { + ep = "http://localhost:14268/api/traces" + } + exp, err := jaeger.New(jaeger.WithCollectorEndpoint(jaeger.WithEndpoint(ep))) + if err != nil { + return nil, err + } + + // Record information about this application in a Resource. + res, _ := resource.Merge( + resource.Default(), + resource.NewWithAttributes( + semconv.SchemaURL, + semconv.ServiceNameKey.String(serviceName), + semconv.ServiceVersionKey.String("v0.1.0"), + attribute.String("environment", "test"), + ), + ) + + // Create the TraceProvider. + tp := trace.NewTracerProvider( + // Always be sure to batch in production. + trace.WithBatcher(exp), + // Record information about this application in a Resource. + trace.WithResource(res), + trace.WithSampler(trace.AlwaysSample()), + ) + return tp, nil +} + +// QueryUser queries information for specified GitHub user, and display a +// brief introduction which includes name, blog, and the most popular repo. +func QueryUser(username string) error { + ctx, span := otel.Tracer("query").Start(context.Background(), "QueryUser") + defer span.End() + + span.SetAttributes( + attribute.String("query.username", username), + ) + profile, err := githubClient.GetUserProfile(ctx, username) + if err != nil { + span.RecordError(err) + span.SetStatus(codes.Error, err.Error()) + return err + } + span.SetAttributes( + attribute.String("query.name", profile.Name), + attribute.String("result.blog", profile.Blog), + ) + repo, err := findMostPopularRepo(ctx, username) + if err != nil { + span.RecordError(err) + span.SetStatus(codes.Error, err.Error()) + return err + } + span.SetAttributes( + attribute.String("popular.repo.name", repo.Name), + attribute.Int("popular.repo.star", repo.Star), + ) + fmt.Printf("The most popular repo of %s (%s) is %s, with %d stars\n", profile.Name, profile.Blog, repo.Name, repo.Star) + return nil +} + +func findMostPopularRepo(ctx context.Context, username string) (repo *github.Repo, err error) { + ctx, span := otel.Tracer("query").Start(ctx, "findMostPopularRepo") + defer span.End() + + for page := 1; ; page++ { + var repos []*github.Repo + repos, err = githubClient.ListUserRepo(ctx, username, page) + if err != nil { + return + } + if len(repos) == 0 { + break + } + if repo == nil { + repo = repos[0] + } + for _, rp := range repos[1:] { + if rp.Star >= repo.Star { + repo = rp + } + } + if len(repos) == 100 { + continue + } + break + } + + if repo == nil { + err = fmt.Errorf("no repo found for %s", username) + } + return +} + +func main() { + tp, err := traceProvider() + if err != nil { + panic(err) + } + otel.SetTracerProvider(tp) + + githubClient = github.NewClient() + if os.Getenv("DEBUG") == "on" { + githubClient.SetDebug(true) + } + if token := os.Getenv("GITHUB_TOKEN"); token != "" { + githubClient.LoginWithToken(token) + } + githubClient.SetTracer(otel.Tracer("github")) + + sigs := make(chan os.Signal, 1) + signal.Notify(sigs, syscall.SIGTERM, syscall.SIGINT) + go func() { + sig := <-sigs + fmt.Printf("Caught %s, shutting down\n", sig) + if err := tp.Shutdown(context.Background()); err != nil { + log.Fatal(err) + } + os.Exit(0) + }() + + for { + var name string + fmt.Printf("Please give a github username: ") + _, err := fmt.Fscanf(os.Stdin, "%s\n", &name) + if err != nil { + panic(err) + } + err = QueryUser(name) + if err != nil { + fmt.Println(err.Error()) + } + } +} diff --git a/examples/upload/README.md b/examples/upload/README.md new file mode 100644 index 00000000..6c9dbd7c --- /dev/null +++ b/examples/upload/README.md @@ -0,0 +1,19 @@ +# upload + +This is a upload exmaple for `req` + +## How to Run + +Run `uploadserver`: + +```go +cd uploadserver +go run . +``` + +Run `uploadclient`: + +```go +cd uploadclient +go run . +``` \ No newline at end of file diff --git a/examples/upload/uploadclient/go.mod b/examples/upload/uploadclient/go.mod new file mode 100644 index 00000000..e338f5cb --- /dev/null +++ b/examples/upload/uploadclient/go.mod @@ -0,0 +1,32 @@ +module uploadclient + +go 1.22.0 + +toolchain go1.22.3 + +replace github.com/imroc/req/v3 => ../../../ + +require github.com/imroc/req/v3 v3.0.0 + +require ( + github.com/cheekybits/genny v1.0.0 // indirect + github.com/fsnotify/fsnotify v1.5.4 // indirect + github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/lucas-clemente/quic-go v0.28.1 // indirect + github.com/marten-seemann/qpack v0.2.1 // indirect + github.com/marten-seemann/qtls-go1-16 v0.1.5 // indirect + github.com/marten-seemann/qtls-go1-17 v0.1.2 // indirect + github.com/marten-seemann/qtls-go1-18 v0.1.2 // indirect + github.com/marten-seemann/qtls-go1-19 v0.1.0 // indirect + github.com/nxadm/tail v1.4.8 // indirect + github.com/onsi/ginkgo v1.16.5 // indirect + golang.org/x/crypto v0.29.0 // indirect + golang.org/x/mod v0.22.0 // indirect + golang.org/x/net v0.31.0 // indirect + golang.org/x/sys v0.27.0 // indirect + golang.org/x/text v0.20.0 // indirect + golang.org/x/tools v0.27.0 // indirect + gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect +) diff --git a/examples/upload/uploadclient/go.sum b/examples/upload/uploadclient/go.sum new file mode 100644 index 00000000..b445b360 --- /dev/null +++ b/examples/upload/uploadclient/go.sum @@ -0,0 +1,339 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo= +dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= +dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= +dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= +dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= +git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= +github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/cheekybits/genny v1.0.0 h1:uGGa4nei+j20rOSeDeP5Of12XVm7TGUd4dJA9RDitfE= +github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= +github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/lucas-clemente/quic-go v0.28.1 h1:Uo0lvVxWg5la9gflIF9lwa39ONq85Xq2D91YNEIslzU= +github.com/lucas-clemente/quic-go v0.28.1/go.mod h1:oGz5DKK41cJt5+773+BSO9BXDsREY4HLf7+0odGAPO0= +github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/marten-seemann/qpack v0.2.1 h1:jvTsT/HpCn2UZJdP+UUB53FfUUgeOyG5K1ns0OJOGVs= +github.com/marten-seemann/qpack v0.2.1/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc= +github.com/marten-seemann/qtls-go1-16 v0.1.5 h1:o9JrYPPco/Nukd/HpOHMHZoBDXQqoNtUCmny98/1uqQ= +github.com/marten-seemann/qtls-go1-16 v0.1.5/go.mod h1:gNpI2Ol+lRS3WwSOtIUUtRwZEQMXjYK+dQSBFbethAk= +github.com/marten-seemann/qtls-go1-17 v0.1.2 h1:JADBlm0LYiVbuSySCHeY863dNkcpMmDR7s0bLKJeYlQ= +github.com/marten-seemann/qtls-go1-17 v0.1.2/go.mod h1:C2ekUKcDdz9SDWxec1N/MvcXBpaX9l3Nx67XaR84L5s= +github.com/marten-seemann/qtls-go1-18 v0.1.2 h1:JH6jmzbduz0ITVQ7ShevK10Av5+jBEKAHMntXmIV7kM= +github.com/marten-seemann/qtls-go1-18 v0.1.2/go.mod h1:mJttiymBAByA49mhlNZZGrH5u1uXYZJ+RW28Py7f4m4= +github.com/marten-seemann/qtls-go1-19 v0.1.0-beta.1/go.mod h1:5HTDWtVudo/WFsHKRNuOhWlbdjrfs5JHrYb0wIJqGpI= +github.com/marten-seemann/qtls-go1-19 v0.1.0 h1:rLFKD/9mp/uq1SYGYuVZhm83wkmU95pK5df3GufyYYU= +github.com/marten-seemann/qtls-go1-19 v0.1.0/go.mod h1:5HTDWtVudo/WFsHKRNuOhWlbdjrfs5JHrYb0wIJqGpI= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= +github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= +github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= +github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= +github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= +github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw= +github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI= +github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU= +github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= +github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg= +github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw= +github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y= +github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q= +github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ= +github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I= +github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0= +github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= +github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk= +github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= +github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= +github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= +github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= +github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= +github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= +go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= +golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= +golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa h1:zuSxTR4o9y82ebqCUJYNGJbGPo6sKVl54f/TVDObg1c= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= +golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= +golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= +golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220111093109-d55c255bac03 h1:0FB83qp0AzVJm+0wcIlauAjJ+tNdh7jLuacRYCIVv7s= +golang.org/x/net v0.0.0-20220111093109-d55c255bac03/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220802222814-0bcc04d9c69b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20220809012201-f428fae20770 h1:dIi4qVdvjZEjiMDv7vhokAZNGnz3kepwuXqFKYDdDMs= +golang.org/x/net v0.0.0-20220809012201-f428fae20770/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= +golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= +golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= +golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220731174439-a90be440212d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220808155132-1c4a2a72c664 h1:v1W7bwXHsnLLloWYTVEdvGvA7BHMeBYsPcF0GLDxIRs= +golang.org/x/sys v0.0.0-20220808155132-1c4a2a72c664/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.11.0/go.mod h1:anzJrxPjNtfgiYQYirP2CPGzGLxrH2u2QBhn6Bf3qY8= +golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= +golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc= +golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= +golang.org/x/tools v0.27.0/go.mod h1:sUi0ZgbwW9ZPAq26Ekut+weQPR5eIM6GQLQ1Yjm1H0Q= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= +google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= +sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/examples/upload/uploadclient/main.go b/examples/upload/uploadclient/main.go new file mode 100644 index 00000000..d9c8fd34 --- /dev/null +++ b/examples/upload/uploadclient/main.go @@ -0,0 +1,31 @@ +package main + +import ( + "github.com/imroc/req/v3" +) + +func main() { + req.EnableDumpAllWithoutRequestBody() + req.SetFile("files", "../../../README.md"). + SetFile("files", "../../../LICENSE"). + SetFormData(map[string]string{ + "name": "imroc", + "email": "roc@imroc.cc", + }). + Post("http://127.0.0.1:8888/upload") + /* Output + POST /upload HTTP/1.1 + Host: 127.0.0.1:8888 + User-Agent: req/v2 (https://github.com/imroc/req) + Transfer-Encoding: chunked + Content-Type: multipart/form-data; boundary=6af1b071a682709355cf5fb15b9cf9e793df7a45e5cd1eb7c413f2e72bf6 + Accept-Encoding: gzip + + HTTP/1.1 200 OK + Content-Type: text/plain; charset=utf-8 + Date: Tue, 25 Jan 2022 09:40:36 GMT + Content-Length: 76 + + Uploaded successfully 2 files with fields name=imroc and email=roc@imroc.cc. + */ +} diff --git a/examples/upload/uploadserver/go.mod b/examples/upload/uploadserver/go.mod new file mode 100644 index 00000000..9daf3713 --- /dev/null +++ b/examples/upload/uploadserver/go.mod @@ -0,0 +1,27 @@ +module uploadserver + +go 1.18 + +require github.com/gin-gonic/gin v1.8.1 + +require ( + github.com/gin-contrib/sse v0.1.0 // indirect + github.com/go-playground/locales v0.14.0 // indirect + github.com/go-playground/universal-translator v0.18.0 // indirect + github.com/go-playground/validator/v10 v10.11.0 // indirect + github.com/goccy/go-json v0.9.10 // indirect + github.com/golang/protobuf v1.5.2 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/leodido/go-urn v1.2.1 // indirect + github.com/mattn/go-isatty v0.0.14 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/pelletier/go-toml/v2 v2.0.2 // indirect + github.com/ugorji/go/codec v1.2.7 // indirect + golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa // indirect + golang.org/x/net v0.0.0-20220809012201-f428fae20770 // indirect + golang.org/x/sys v0.0.0-20220808155132-1c4a2a72c664 // indirect + golang.org/x/text v0.3.7 // indirect + google.golang.org/protobuf v1.28.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect +) diff --git a/examples/upload/uploadserver/go.sum b/examples/upload/uploadserver/go.sum new file mode 100644 index 00000000..a10a0570 --- /dev/null +++ b/examples/upload/uploadserver/go.sum @@ -0,0 +1,128 @@ +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.7.7 h1:3DoBmSbJbZAWqXJC3SLjAPfutPJJRN1U5pALB7EeTTs= +github.com/gin-gonic/gin v1.7.7/go.mod h1:axIBovoeJpVj8S3BwE0uPMTeReE4+AfFtqpqaZ1qq1U= +github.com/gin-gonic/gin v1.8.1 h1:4+fr/el88TOO3ewCmQr8cx/CtZ/umlIRIs5M4NTNjf8= +github.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR9tTTk= +github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A= +github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= +github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= +github.com/go-playground/locales v0.14.0 h1:u50s323jtVGugKlcYeyzC0etD1HifMjqmJqb8WugfUU= +github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs= +github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no= +github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= +github.com/go-playground/universal-translator v0.18.0 h1:82dyy6p4OuJq4/CByFNOn/jYrnRPArHwAcmLoJZxyho= +github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA= +github.com/go-playground/validator/v10 v10.4.1 h1:pH2c5ADXtd66mxoE0Zm9SUhxE20r7aM3F26W0hOn+GE= +github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= +github.com/go-playground/validator/v10 v10.11.0 h1:0W+xRM511GY47Yy3bZUbJVitCNg2BOGlCyvTqsp/xIw= +github.com/go-playground/validator/v10 v10.11.0/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU= +github.com/goccy/go-json v0.9.10 h1:hCeNmprSNLB8B8vQKWl6DpuH0t60oEs+TAk9a7CScKc= +github.com/goccy/go-json v0.9.10/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= +github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w= +github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY= +github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/pelletier/go-toml/v2 v2.0.2 h1:+jQXlF3scKIcSEKkdHzXhCTDLPFi5r1wnK6yPS+49Gw= +github.com/pelletier/go-toml/v2 v2.0.2/go.mod h1:MovirKjgVRESsAvNZlAjtFwV867yGuwRkXbG66OzopI= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go v1.2.7 h1:qYhyWUUd6WbiM+C6JZAUkIJt/1WrjzNHY9+KCIjVqTo= +github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M= +github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/ugorji/go/codec v1.2.7 h1:YPXUKf7fYbp/y8xloBqZOw2qaVggbfwMlI8WM3wZUJ0= +github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa h1:zuSxTR4o9y82ebqCUJYNGJbGPo6sKVl54f/TVDObg1c= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220809012201-f428fae20770 h1:dIi4qVdvjZEjiMDv7vhokAZNGnz3kepwuXqFKYDdDMs= +golang.org/x/net v0.0.0-20220809012201-f428fae20770/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42 h1:vEOn+mP2zCOVzKckCZy6YsCtDblrpj/w7B9nxGNELpg= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220808155132-1c4a2a72c664 h1:v1W7bwXHsnLLloWYTVEdvGvA7BHMeBYsPcF0GLDxIRs= +golang.org/x/sys v0.0.0-20220808155132-1c4a2a72c664/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/examples/upload/uploadserver/main.go b/examples/upload/uploadserver/main.go new file mode 100644 index 00000000..d9c2c78a --- /dev/null +++ b/examples/upload/uploadserver/main.go @@ -0,0 +1,35 @@ +package main + +import ( + "net/http" + "path/filepath" + + "github.com/gin-gonic/gin" +) + +func main() { + router := gin.Default() + router.POST("/upload", func(c *gin.Context) { + name := c.PostForm("name") + email := c.PostForm("email") + + // Multipart form + form, err := c.MultipartForm() + if err != nil { + c.String(http.StatusBadRequest, "get form err: %s", err.Error()) + return + } + files := form.File["files"] + + for _, file := range files { + filename := filepath.Base(file.Filename) + if err := c.SaveUploadedFile(file, filename); err != nil { + c.String(http.StatusBadRequest, "upload file err: %s", err.Error()) + return + } + } + + c.String(http.StatusOK, "Uploaded successfully %d files with fields name=%s and email=%s.", len(files), name, email) + }) + router.Run(":8888") +} diff --git a/examples/uploadcallback/README.md b/examples/uploadcallback/README.md new file mode 100644 index 00000000..1d5574b1 --- /dev/null +++ b/examples/uploadcallback/README.md @@ -0,0 +1,19 @@ +# uploadcallback + +This is a upload callback exmaple for `req` + +## How to Run + +Run `uploadserver`: + +```go +cd uploadserver +go run . +``` + +Run `uploadclient`: + +```go +cd uploadclient +go run . +``` \ No newline at end of file diff --git a/examples/uploadcallback/uploadclient/go.mod b/examples/uploadcallback/uploadclient/go.mod new file mode 100644 index 00000000..e338f5cb --- /dev/null +++ b/examples/uploadcallback/uploadclient/go.mod @@ -0,0 +1,32 @@ +module uploadclient + +go 1.22.0 + +toolchain go1.22.3 + +replace github.com/imroc/req/v3 => ../../../ + +require github.com/imroc/req/v3 v3.0.0 + +require ( + github.com/cheekybits/genny v1.0.0 // indirect + github.com/fsnotify/fsnotify v1.5.4 // indirect + github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/lucas-clemente/quic-go v0.28.1 // indirect + github.com/marten-seemann/qpack v0.2.1 // indirect + github.com/marten-seemann/qtls-go1-16 v0.1.5 // indirect + github.com/marten-seemann/qtls-go1-17 v0.1.2 // indirect + github.com/marten-seemann/qtls-go1-18 v0.1.2 // indirect + github.com/marten-seemann/qtls-go1-19 v0.1.0 // indirect + github.com/nxadm/tail v1.4.8 // indirect + github.com/onsi/ginkgo v1.16.5 // indirect + golang.org/x/crypto v0.29.0 // indirect + golang.org/x/mod v0.22.0 // indirect + golang.org/x/net v0.31.0 // indirect + golang.org/x/sys v0.27.0 // indirect + golang.org/x/text v0.20.0 // indirect + golang.org/x/tools v0.27.0 // indirect + gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect +) diff --git a/examples/uploadcallback/uploadclient/go.sum b/examples/uploadcallback/uploadclient/go.sum new file mode 100644 index 00000000..b445b360 --- /dev/null +++ b/examples/uploadcallback/uploadclient/go.sum @@ -0,0 +1,339 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo= +dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= +dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= +dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= +dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= +git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= +github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/cheekybits/genny v1.0.0 h1:uGGa4nei+j20rOSeDeP5Of12XVm7TGUd4dJA9RDitfE= +github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= +github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/lucas-clemente/quic-go v0.28.1 h1:Uo0lvVxWg5la9gflIF9lwa39ONq85Xq2D91YNEIslzU= +github.com/lucas-clemente/quic-go v0.28.1/go.mod h1:oGz5DKK41cJt5+773+BSO9BXDsREY4HLf7+0odGAPO0= +github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/marten-seemann/qpack v0.2.1 h1:jvTsT/HpCn2UZJdP+UUB53FfUUgeOyG5K1ns0OJOGVs= +github.com/marten-seemann/qpack v0.2.1/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc= +github.com/marten-seemann/qtls-go1-16 v0.1.5 h1:o9JrYPPco/Nukd/HpOHMHZoBDXQqoNtUCmny98/1uqQ= +github.com/marten-seemann/qtls-go1-16 v0.1.5/go.mod h1:gNpI2Ol+lRS3WwSOtIUUtRwZEQMXjYK+dQSBFbethAk= +github.com/marten-seemann/qtls-go1-17 v0.1.2 h1:JADBlm0LYiVbuSySCHeY863dNkcpMmDR7s0bLKJeYlQ= +github.com/marten-seemann/qtls-go1-17 v0.1.2/go.mod h1:C2ekUKcDdz9SDWxec1N/MvcXBpaX9l3Nx67XaR84L5s= +github.com/marten-seemann/qtls-go1-18 v0.1.2 h1:JH6jmzbduz0ITVQ7ShevK10Av5+jBEKAHMntXmIV7kM= +github.com/marten-seemann/qtls-go1-18 v0.1.2/go.mod h1:mJttiymBAByA49mhlNZZGrH5u1uXYZJ+RW28Py7f4m4= +github.com/marten-seemann/qtls-go1-19 v0.1.0-beta.1/go.mod h1:5HTDWtVudo/WFsHKRNuOhWlbdjrfs5JHrYb0wIJqGpI= +github.com/marten-seemann/qtls-go1-19 v0.1.0 h1:rLFKD/9mp/uq1SYGYuVZhm83wkmU95pK5df3GufyYYU= +github.com/marten-seemann/qtls-go1-19 v0.1.0/go.mod h1:5HTDWtVudo/WFsHKRNuOhWlbdjrfs5JHrYb0wIJqGpI= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= +github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= +github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= +github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= +github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= +github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw= +github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI= +github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU= +github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= +github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg= +github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw= +github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y= +github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q= +github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ= +github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I= +github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0= +github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= +github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk= +github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= +github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= +github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= +github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= +github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= +github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= +go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= +golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= +golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa h1:zuSxTR4o9y82ebqCUJYNGJbGPo6sKVl54f/TVDObg1c= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= +golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= +golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= +golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220111093109-d55c255bac03 h1:0FB83qp0AzVJm+0wcIlauAjJ+tNdh7jLuacRYCIVv7s= +golang.org/x/net v0.0.0-20220111093109-d55c255bac03/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220802222814-0bcc04d9c69b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20220809012201-f428fae20770 h1:dIi4qVdvjZEjiMDv7vhokAZNGnz3kepwuXqFKYDdDMs= +golang.org/x/net v0.0.0-20220809012201-f428fae20770/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= +golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= +golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= +golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220731174439-a90be440212d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220808155132-1c4a2a72c664 h1:v1W7bwXHsnLLloWYTVEdvGvA7BHMeBYsPcF0GLDxIRs= +golang.org/x/sys v0.0.0-20220808155132-1c4a2a72c664/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.11.0/go.mod h1:anzJrxPjNtfgiYQYirP2CPGzGLxrH2u2QBhn6Bf3qY8= +golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= +golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc= +golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= +golang.org/x/tools v0.27.0/go.mod h1:sUi0ZgbwW9ZPAq26Ekut+weQPR5eIM6GQLQ1Yjm1H0Q= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= +google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= +sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/examples/uploadcallback/uploadclient/main.go b/examples/uploadcallback/uploadclient/main.go new file mode 100644 index 00000000..19dd217e --- /dev/null +++ b/examples/uploadcallback/uploadclient/main.go @@ -0,0 +1,47 @@ +package main + +import ( + "fmt" + "io" + "time" + "github.com/imroc/req/v3" +) + +type SlowReader struct { + Size int + n int +} + +func (r *SlowReader) Close() error { + return nil +} + +func (r *SlowReader) Read(p []byte) (int, error) { + if r.n >= r.Size { + return 0, io.EOF + } + time.Sleep(1 * time.Millisecond) + n := len(p) + if r.n+n >= r.Size { + n = r.Size - r.n + } + for i := 0; i < n; i++ { + p[i] = 'h' + } + r.n += n + return n, nil +} + +func main() { + size := 10 * 1024 * 1024 + req.SetFileUpload(req.FileUpload{ + ParamName: "file", + FileName: "test.txt", + GetFileContent: func() (io.ReadCloser, error) { + return &SlowReader{Size: size}, nil + }, + FileSize: int64(size), + }).SetUploadCallbackWithInterval(func(info req.UploadInfo) { + fmt.Printf("%s: %.2f%%\n", info.FileName, float64(info.UploadedSize)/float64(info.FileSize)*100.0) + }, 30*time.Millisecond).Post("http://127.0.0.1:8888/upload") +} diff --git a/examples/uploadcallback/uploadserver/go.mod b/examples/uploadcallback/uploadserver/go.mod new file mode 100644 index 00000000..9daf3713 --- /dev/null +++ b/examples/uploadcallback/uploadserver/go.mod @@ -0,0 +1,27 @@ +module uploadserver + +go 1.18 + +require github.com/gin-gonic/gin v1.8.1 + +require ( + github.com/gin-contrib/sse v0.1.0 // indirect + github.com/go-playground/locales v0.14.0 // indirect + github.com/go-playground/universal-translator v0.18.0 // indirect + github.com/go-playground/validator/v10 v10.11.0 // indirect + github.com/goccy/go-json v0.9.10 // indirect + github.com/golang/protobuf v1.5.2 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/leodido/go-urn v1.2.1 // indirect + github.com/mattn/go-isatty v0.0.14 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/pelletier/go-toml/v2 v2.0.2 // indirect + github.com/ugorji/go/codec v1.2.7 // indirect + golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa // indirect + golang.org/x/net v0.0.0-20220809012201-f428fae20770 // indirect + golang.org/x/sys v0.0.0-20220808155132-1c4a2a72c664 // indirect + golang.org/x/text v0.3.7 // indirect + google.golang.org/protobuf v1.28.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect +) diff --git a/examples/uploadcallback/uploadserver/go.sum b/examples/uploadcallback/uploadserver/go.sum new file mode 100644 index 00000000..ac20466c --- /dev/null +++ b/examples/uploadcallback/uploadserver/go.sum @@ -0,0 +1,127 @@ +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.7.7 h1:3DoBmSbJbZAWqXJC3SLjAPfutPJJRN1U5pALB7EeTTs= +github.com/gin-gonic/gin v1.7.7/go.mod h1:axIBovoeJpVj8S3BwE0uPMTeReE4+AfFtqpqaZ1qq1U= +github.com/gin-gonic/gin v1.8.1 h1:4+fr/el88TOO3ewCmQr8cx/CtZ/umlIRIs5M4NTNjf8= +github.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR9tTTk= +github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A= +github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= +github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= +github.com/go-playground/locales v0.14.0 h1:u50s323jtVGugKlcYeyzC0etD1HifMjqmJqb8WugfUU= +github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs= +github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no= +github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= +github.com/go-playground/universal-translator v0.18.0 h1:82dyy6p4OuJq4/CByFNOn/jYrnRPArHwAcmLoJZxyho= +github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA= +github.com/go-playground/validator/v10 v10.4.1 h1:pH2c5ADXtd66mxoE0Zm9SUhxE20r7aM3F26W0hOn+GE= +github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= +github.com/go-playground/validator/v10 v10.11.0 h1:0W+xRM511GY47Yy3bZUbJVitCNg2BOGlCyvTqsp/xIw= +github.com/go-playground/validator/v10 v10.11.0/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU= +github.com/goccy/go-json v0.9.10 h1:hCeNmprSNLB8B8vQKWl6DpuH0t60oEs+TAk9a7CScKc= +github.com/goccy/go-json v0.9.10/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= +github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w= +github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY= +github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/pelletier/go-toml/v2 v2.0.2 h1:+jQXlF3scKIcSEKkdHzXhCTDLPFi5r1wnK6yPS+49Gw= +github.com/pelletier/go-toml/v2 v2.0.2/go.mod h1:MovirKjgVRESsAvNZlAjtFwV867yGuwRkXbG66OzopI= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go v1.2.7 h1:qYhyWUUd6WbiM+C6JZAUkIJt/1WrjzNHY9+KCIjVqTo= +github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M= +github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/ugorji/go/codec v1.2.7 h1:YPXUKf7fYbp/y8xloBqZOw2qaVggbfwMlI8WM3wZUJ0= +github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa h1:zuSxTR4o9y82ebqCUJYNGJbGPo6sKVl54f/TVDObg1c= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220809012201-f428fae20770 h1:dIi4qVdvjZEjiMDv7vhokAZNGnz3kepwuXqFKYDdDMs= +golang.org/x/net v0.0.0-20220809012201-f428fae20770/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42 h1:vEOn+mP2zCOVzKckCZy6YsCtDblrpj/w7B9nxGNELpg= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220808155132-1c4a2a72c664 h1:v1W7bwXHsnLLloWYTVEdvGvA7BHMeBYsPcF0GLDxIRs= +golang.org/x/sys v0.0.0-20220808155132-1c4a2a72c664/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/examples/uploadcallback/uploadserver/main.go b/examples/uploadcallback/uploadserver/main.go new file mode 100644 index 00000000..62def71d --- /dev/null +++ b/examples/uploadcallback/uploadserver/main.go @@ -0,0 +1,17 @@ +package main + +import ( + "github.com/gin-gonic/gin" + "io" + "net/http" +) + +func main() { + router := gin.Default() + router.POST("/upload", func(c *gin.Context) { + body := c.Request.Body + io.Copy(io.Discard, body) + c.String(http.StatusOK, "ok") + }) + router.Run(":8888") +} diff --git a/go.mod b/go.mod new file mode 100644 index 00000000..c54597be --- /dev/null +++ b/go.mod @@ -0,0 +1,29 @@ +module github.com/imroc/req/v3 + +go 1.22.0 + +require ( + github.com/andybalholm/brotli v1.1.1 + github.com/hashicorp/go-multierror v1.1.1 + github.com/klauspost/compress v1.17.11 + github.com/quic-go/qpack v0.5.1 + github.com/quic-go/quic-go v0.48.2 + github.com/refraction-networking/utls v1.6.7 + golang.org/x/net v0.33.0 + golang.org/x/text v0.21.0 +) + +require ( + github.com/cloudflare/circl v1.5.0 // indirect + github.com/go-task/slim-sprig/v3 v3.0.0 // indirect + github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/onsi/ginkgo/v2 v2.22.0 // indirect + go.uber.org/mock v0.5.0 // indirect + golang.org/x/crypto v0.31.0 // indirect + golang.org/x/exp v0.0.0-20241215155358-4a5509556b9e // indirect + golang.org/x/mod v0.22.0 // indirect + golang.org/x/sync v0.10.0 // indirect + golang.org/x/sys v0.28.0 // indirect + golang.org/x/tools v0.28.0 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 00000000..98c5fd3b --- /dev/null +++ b/go.sum @@ -0,0 +1,85 @@ +github.com/andybalholm/brotli v1.1.0 h1:eLKJA0d02Lf0mVpIDgYnqXcUn0GqVmEFny3VuID1U3M= +github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer58Q+mhRPtnY= +github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA= +github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA= +github.com/cloudflare/circl v1.4.0 h1:BV7h5MgrktNzytKmWjpOtdYrf0lkkbF8YMlBGPhJQrY= +github.com/cloudflare/circl v1.4.0/go.mod h1:PDRU+oXvdD7KCtgKxW95M5Z8BpSCJXQORiZFnBQS5QU= +github.com/cloudflare/circl v1.5.0 h1:hxIWksrX6XN5a1L2TI/h53AGPhNHoUBo+TD1ms9+pys= +github.com/cloudflare/circl v1.5.0/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/pprof v0.0.0-20241203143554-1e3fdc7de467 h1:keEZFtbLJugfE0qHn+Ge1JCE71spzkchQobDf3mzS/4= +github.com/google/pprof v0.0.0-20241203143554-1e3fdc7de467/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg= +github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= +github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg= +github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= +github.com/onsi/gomega v1.34.2 h1:pNCwDkzrsv7MS9kpaQvVb1aVLahQXyJ/Tv5oAZMI3i8= +github.com/onsi/gomega v1.34.2/go.mod h1:v1xfxRgk0KIsG+QOdm7p8UosrOzPYRo60fd3B/1Dukc= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI= +github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg= +github.com/quic-go/quic-go v0.48.2 h1:wsKXZPeGWpMpCGSWqOcqpW2wZYic/8T3aqiOID0/KWE= +github.com/quic-go/quic-go v0.48.2/go.mod h1:yBgs3rWBOADpga7F+jJsb6Ybg1LSYiQvwWlLX+/6HMs= +github.com/refraction-networking/utls v1.6.7 h1:zVJ7sP1dJx/WtVuITug3qYUq034cDq9B2MR1K67ULZM= +github.com/refraction-networking/utls v1.6.7/go.mod h1:BC3O4vQzye5hqpmDTWUqi4P5DDhzJfkV1tdqtawQIH0= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E= +go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU= +go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM= +golang.org/x/crypto v0.29.0 h1:L5SG1JTTXupVV3n6sUqMTeWbjAyfPwoda2DLX8J8FrQ= +golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f h1:XdNn9LlyWAhLVp6P/i8QYBW+hlyhrhei9uErw2B5GJo= +golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f/go.mod h1:D5SMRVC3C2/4+F/DB1wZsLRnSNimn2Sp/NPsCrsv8ak= +golang.org/x/exp v0.0.0-20241215155358-4a5509556b9e h1:4qufH0hlUYs6AO6XmZC3GqfDPGSXHVXUFR6OND+iJX4= +golang.org/x/exp v0.0.0-20241215155358-4a5509556b9e/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= +golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= +golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/net v0.31.0 h1:68CPQngjLL0r2AlUKiSxtQFKvzRVbnzLwMUn5SzcLHo= +golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM= +golang.org/x/net v0.32.0 h1:ZqPmj8Kzc+Y6e0+skZsuACbx+wzMgo5MQsJh9Qd6aYI= +golang.org/x/net v0.32.0/go.mod h1:CwU0IoeOlnQQWJ6ioyFrfRuomB8GKF6KbYXZVyeXNfs= +golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= +golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.27.0 h1:qEKojBykQkQ4EynWy4S8Weg69NumxKdn40Fce3uc/8o= +golang.org/x/tools v0.27.0/go.mod h1:sUi0ZgbwW9ZPAq26Ekut+weQPR5eIM6GQLQ1Yjm1H0Q= +golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8= +golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw= +google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= +google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/header.go b/header.go new file mode 100644 index 00000000..c78b9a71 --- /dev/null +++ b/header.go @@ -0,0 +1,120 @@ +package req + +import ( + "io" + "net/http" + "net/textproto" + "sort" + "strings" + "sync" + + "golang.org/x/net/http/httpguts" + + "github.com/imroc/req/v3/internal/header" +) + +var headerNewlineToSpace = strings.NewReplacer("\n", " ", "\r", " ") + +// stringWriter implements WriteString on a Writer. +type stringWriter struct { + w io.Writer +} + +func (w stringWriter) WriteString(s string) (n int, err error) { + return w.w.Write([]byte(s)) +} + +// A headerSorter implements sort.Interface by sorting a []keyValues +// by key. It's used as a pointer, so it can fit in a sort.Interface +// interface value without allocation. +type headerSorter struct { + kvs []header.KeyValues +} + +func (s *headerSorter) Len() int { return len(s.kvs) } +func (s *headerSorter) Swap(i, j int) { s.kvs[i], s.kvs[j] = s.kvs[j], s.kvs[i] } +func (s *headerSorter) Less(i, j int) bool { return s.kvs[i].Key < s.kvs[j].Key } + +var headerSorterPool = sync.Pool{ + New: func() interface{} { return new(headerSorter) }, +} + +// get is like Get, but key must already be in CanonicalHeaderKey form. +func headerGet(h http.Header, key string) string { + if v := h[key]; len(v) > 0 { + return v[0] + } + return "" +} + +// has reports whether h has the provided key defined, even if it's +// set to 0-length slice. +func headerHas(h http.Header, key string) bool { + _, ok := h[key] + return ok +} + +// sortedKeyValues returns h's keys sorted in the returned kvs +// slice. The headerSorter used to sort is also returned, for possible +// return to headerSorterCache. +func headerSortedKeyValues(h http.Header, exclude map[string]bool) (kvs []header.KeyValues, hs *headerSorter) { + hs = headerSorterPool.Get().(*headerSorter) + if cap(hs.kvs) < len(h) { + hs.kvs = make([]header.KeyValues, 0, len(h)) + } + kvs = hs.kvs[:0] + for k, vv := range h { + if !exclude[k] { + kvs = append(kvs, header.KeyValues{k, vv}) + } + } + hs.kvs = kvs + sort.Sort(hs) + return kvs, hs +} + +func headerWrite(h http.Header, writeHeader func(key string, values ...string) error, sort bool) error { + return headerWriteSubset(h, nil, writeHeader, sort) +} + +func headerWriteSubset(h http.Header, exclude map[string]bool, writeHeader func(key string, values ...string) error, sort bool) error { + var kvs []header.KeyValues + var hs *headerSorter + if sort { + kvs = make([]header.KeyValues, 0, len(h)) + for k, v := range h { + if !exclude[k] { + kvs = append(kvs, header.KeyValues{k, v}) + } + } + } else { + kvs, hs = headerSortedKeyValues(h, exclude) + } + for _, kv := range kvs { + if !httpguts.ValidHeaderFieldName(kv.Key) { + // This could be an error. In the common case of + // writing response headers, however, we have no good + // way to provide the error back to the server + // handler, so just drop invalid headers instead. + continue + } + for i, v := range kv.Values { + vv := headerNewlineToSpace.Replace(v) + vv = textproto.TrimString(vv) + if vv != v { + kv.Values[i] = vv + } + } + err := writeHeader(kv.Key, kv.Values...) + if err != nil { + if hs != nil { + headerSorterPool.Put(hs) + } + return err + } + } + if hs != nil { + headerSorterPool.Put(hs) + } + return nil +} diff --git a/http.go b/http.go new file mode 100644 index 00000000..61a63676 --- /dev/null +++ b/http.go @@ -0,0 +1,204 @@ +package req + +import ( + "encoding/base64" + "fmt" + "io" + "net/http" + "net/textproto" + "strings" + + "github.com/imroc/req/v3/internal/ascii" + "golang.org/x/net/http/httpguts" + "golang.org/x/net/idna" +) + +// maxInt64 is the effective "infinite" value for the Server and +// Transport's byte-limiting readers. +const maxInt64 = 1<<63 - 1 + +// incomparable is a zero-width, non-comparable type. Adding it to a struct +// makes that struct also non-comparable, and generally doesn't add +// any size (as long as it's first). +type incomparable [0]func() + +// bodyIsWritable reports whether the Body supports writing. The +// Transport returns Writable bodies for 101 Switching Protocols +// responses. +// The Transport uses this method to determine whether a persistent +// connection is done being managed from its perspective. Once we +// return a writable response body to a user, the net/http package is +// done managing that connection. +func bodyIsWritable(r *http.Response) bool { + _, ok := r.Body.(io.Writer) + return ok +} + +// isProtocolSwitch reports whether the response code and header +// indicate a successful protocol upgrade response. +func isProtocolSwitch(r *http.Response) bool { + return isProtocolSwitchResponse(r.StatusCode, r.Header) +} + +// isProtocolSwitchResponse reports whether the response code and +// response header indicate a successful protocol upgrade response. +func isProtocolSwitchResponse(code int, h http.Header) bool { + return code == http.StatusSwitchingProtocols && isProtocolSwitchHeader(h) +} + +// isProtocolSwitchHeader reports whether the request or response header +// is for a protocol switch. +func isProtocolSwitchHeader(h http.Header) bool { + return h.Get("Upgrade") != "" && + httpguts.HeaderValuesContainsToken(h["Connection"], "Upgrade") +} + +// NoBody is an io.ReadCloser with no bytes. Read always returns EOF +// and Close always returns nil. It can be used in an outgoing client +// request to explicitly signal that a request has zero bytes. +// An alternative, however, is to simply set Request.Body to nil. +var NoBody = noBody{} + +type noBody struct{} + +func (noBody) Read([]byte) (int, error) { return 0, io.EOF } +func (noBody) Close() error { return nil } +func (noBody) WriteTo(io.Writer) (int64, error) { return 0, nil } + +var ( + // verify that an io.Copy from NoBody won't require a buffer: + _ io.WriterTo = NoBody + _ io.ReadCloser = NoBody +) + +type readResult struct { + _ incomparable + n int + err error + b byte // byte read, if n == 1 +} + +// hasToken reports whether token appears with v, ASCII +// case-insensitive, with space or comma boundaries. +// token must be all lowercase. +// v may contain mixed cased. +func hasToken(v, token string) bool { + if len(token) > len(v) || token == "" { + return false + } + if v == token { + return true + } + for sp := 0; sp <= len(v)-len(token); sp++ { + // Check that first character is good. + // The token is ASCII, so checking only a single byte + // is sufficient. We skip this potential starting + // position if both the first byte and its potential + // ASCII uppercase equivalent (b|0x20) don't match. + // False positives ('^' => '~') are caught by EqualFold. + if b := v[sp]; b != token[0] && b|0x20 != token[0] { + continue + } + // Check that start pos is on a valid token boundary. + if sp > 0 && !isTokenBoundary(v[sp-1]) { + continue + } + // Check that end pos is on a valid token boundary. + if endPos := sp + len(token); endPos != len(v) && !isTokenBoundary(v[endPos]) { + continue + } + if ascii.EqualFold(v[sp:sp+len(token)], token) { + return true + } + } + return false +} + +func isTokenBoundary(b byte) bool { + return b == ' ' || b == ',' || b == '\t' +} + +func badStringError(what, val string) error { return fmt.Errorf("%s %q", what, val) } + +// foreachHeaderElement splits v according to the "#rule" construction +// in RFC 7230 section 7 and calls fn for each non-empty element. +func foreachHeaderElement(v string, fn func(string)) { + v = textproto.TrimString(v) + if v == "" { + return + } + if !strings.Contains(v, ",") { + fn(v) + return + } + for _, f := range strings.Split(v, ",") { + if f = textproto.TrimString(f); f != "" { + fn(f) + } + } +} + +// maxPostHandlerReadBytes is the max number of Request.Body bytes not +// consumed by a handler that the server will read from the client +// in order to keep a connection alive. If there are more bytes than +// this then the server to be paranoid instead sends a "Connection: +// close" response. +// +// This number is approximately what a typical machine's TCP buffer +// size is anyway. (if we have the bytes on the machine, we might as +// well read them) +const maxPostHandlerReadBytes = 256 << 10 + +func idnaASCII(v string) (string, error) { + // TODO: Consider removing this check after verifying performance is okay. + // Right now punycode verification, length checks, context checks, and the + // permissible character tests are all omitted. It also prevents the ToASCII + // call from salvaging an invalid IDN, when possible. As a result it may be + // possible to have two IDNs that appear identical to the user where the + // ASCII-only version causes an error downstream whereas the non-ASCII + // version does not. + // Note that for correct ASCII IDNs ToASCII will only do considerably more + // work, but it will not cause an allocation. + if ascii.Is(v) { + return v, nil + } + return idna.Lookup.ToASCII(v) +} + +// removeZone removes IPv6 zone identifier from host. +// E.g., "[fe80::1%en0]:8080" to "[fe80::1]:8080" +func removeZone(host string) string { + if !strings.HasPrefix(host, "[") { + return host + } + i := strings.LastIndex(host, "]") + if i < 0 { + return host + } + j := strings.LastIndex(host[:i], "%") + if j < 0 { + return host + } + return host[:j] + host[i:] +} + +// stringContainsCTLByte reports whether s contains any ASCII control character. +func stringContainsCTLByte(s string) bool { + for i := 0; i < len(s); i++ { + b := s[i] + if b < ' ' || b == 0x7f { + return true + } + } + return false +} + +// See 2 (end of page 4) https://www.ietf.org/rfc/rfc2617.txt +// "To receive authorization, the client sends the userid and password, +// separated by a single colon (":") character, within a base64 +// encoded string in the credentials." +// It is not meant to be urlencoded. +func basicAuth(username, password string) string { + auth := username + ":" + password + return base64.StdEncoding.EncodeToString([]byte(auth)) +} diff --git a/http2/priority.go b/http2/priority.go new file mode 100644 index 00000000..f63846dd --- /dev/null +++ b/http2/priority.go @@ -0,0 +1,28 @@ +package http2 + +// PriorityParam are the stream prioritzation parameters. +type PriorityParam struct { + // StreamDep is a 31-bit stream identifier for the + // stream that this stream depends on. Zero means no + // dependency. + StreamDep uint32 + + // Exclusive is whether the dependency is exclusive. + Exclusive bool + + // Weight is the stream's zero-indexed weight. It should be + // set together with StreamDep, or neither should be set. Per + // the spec, "Add one to the value to obtain a weight between + // 1 and 256." + Weight uint8 +} + +func (p PriorityParam) IsZero() bool { + return p == PriorityParam{} +} + +// PriorityFrame represents a http priority frame. +type PriorityFrame struct { + StreamID uint32 + PriorityParam PriorityParam +} diff --git a/http2/setting.go b/http2/setting.go new file mode 100644 index 00000000..f351238d --- /dev/null +++ b/http2/setting.go @@ -0,0 +1,48 @@ +package http2 + +import ( + "fmt" +) + +// A SettingID is an HTTP/2 setting as defined in +// https://httpwg.org/specs/rfc7540.html#iana-settings +type SettingID uint16 + +const ( + SettingHeaderTableSize SettingID = 0x1 + SettingEnablePush SettingID = 0x2 + SettingMaxConcurrentStreams SettingID = 0x3 + SettingInitialWindowSize SettingID = 0x4 + SettingMaxFrameSize SettingID = 0x5 + SettingMaxHeaderListSize SettingID = 0x6 +) + +var settingName = map[SettingID]string{ + SettingHeaderTableSize: "HEADER_TABLE_SIZE", + SettingEnablePush: "ENABLE_PUSH", + SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS", + SettingInitialWindowSize: "INITIAL_WINDOW_SIZE", + SettingMaxFrameSize: "MAX_FRAME_SIZE", + SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE", +} + +func (s SettingID) String() string { + if v, ok := settingName[s]; ok { + return v + } + return fmt.Sprintf("UNKNOWN_SETTING_%d", uint16(s)) +} + +// Setting is a setting parameter: which setting it is, and its value. +type Setting struct { + // ID is which setting is being set. + // See https://httpwg.org/specs/rfc7540.html#SettingValues + ID SettingID + + // Val is the value. + Val uint32 +} + +func (s Setting) String() string { + return fmt.Sprintf("[%v = %d]", s.ID, s.Val) +} diff --git a/http_request.go b/http_request.go new file mode 100644 index 00000000..5816de44 --- /dev/null +++ b/http_request.go @@ -0,0 +1,149 @@ +package req + +import ( + "errors" + "net/http" + "strings" + + "golang.org/x/net/http/httpguts" + + "github.com/imroc/req/v3/internal/ascii" + "github.com/imroc/req/v3/internal/header" +) + +// Given a string of the form "host", "host:port", or "[ipv6::address]:port", +// return true if the string includes a port. +func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") } + +// removeEmptyPort strips the empty port in ":port" to "" +// as mandated by RFC 3986 Section 6.2.3. +func removeEmptyPort(host string) string { + if hasPort(host) { + return strings.TrimSuffix(host, ":") + } + return host +} + +func isNotToken(r rune) bool { + return !httpguts.IsTokenRune(r) +} + +func validMethod(method string) bool { + /* + Method = "OPTIONS" ; Section 9.2 + | "GET" ; Section 9.3 + | "HEAD" ; Section 9.4 + | "POST" ; Section 9.5 + | "PUT" ; Section 9.6 + | "DELETE" ; Section 9.7 + | "TRACE" ; Section 9.8 + | "CONNECT" ; Section 9.9 + | extension-method + extension-method = token + token = 1* + */ + return len(method) > 0 && strings.IndexFunc(method, isNotToken) == -1 +} + +func closeBody(r *http.Request) error { + if r.Body == nil { + return nil + } + return r.Body.Close() +} + +// requestBodyReadError wraps an error from (*Request).write to indicate +// that the error came from a Read call on the Request.Body. +// This error type should not escape the net/http package to users. +type requestBodyReadError struct{ error } + +// Return value if nonempty, def otherwise. +func valueOrDefault(value, def string) string { + if value != "" { + return value + } + return def +} + +// outgoingLength reports the Content-Length of this outgoing (Client) request. +// It maps 0 into -1 (unknown) when the Body is non-nil. +func outgoingLength(r *http.Request) int64 { + if r.Body == nil || r.Body == NoBody { + return 0 + } + if r.ContentLength != 0 { + return r.ContentLength + } + return -1 +} + +// errMissingHost is returned by Write when there is no Host or URL present in +// the Request. +var errMissingHost = errors.New("http: Request.Write on Request with no Host or URL set") + +func closeRequestBody(r *http.Request) error { + if r.Body == nil { + return nil + } + return r.Body.Close() +} + +// Headers that Request.Write handles itself and should be skipped. +var reqWriteExcludeHeader = map[string]bool{ + "Host": true, // not in Header map anyway + "User-Agent": true, + "Content-Length": true, + "Transfer-Encoding": true, + "Trailer": true, + header.HeaderOderKey: true, + header.PseudoHeaderOderKey: true, +} + +// requestMethodUsuallyLacksBody reports whether the given request +// method is one that typically does not involve a request body. +// This is used by the Transport (via +// transferWriter.shouldSendChunkedRequestBody) to determine whether +// we try to test-read a byte from a non-nil Request.Body when +// Request.outgoingLength() returns -1. See the comments in +// shouldSendChunkedRequestBody. +func requestMethodUsuallyLacksBody(method string) bool { + switch method { + case "GET", "HEAD", "DELETE", "OPTIONS", "PROPFIND", "SEARCH": + return true + } + return false +} + +// requiresHTTP1 reports whether this request requires being sent on +// an HTTP/1 connection. +func requestRequiresHTTP1(r *http.Request) bool { + return hasToken(r.Header.Get("Connection"), "upgrade") && + ascii.EqualFold(r.Header.Get("Upgrade"), "websocket") +} + +func isReplayable(r *http.Request) bool { + if r.Body == nil || r.Body == NoBody || r.GetBody != nil { + switch valueOrDefault(r.Method, "GET") { + case "GET", "HEAD", "OPTIONS", "TRACE": + return true + } + // The Idempotency-Key, while non-standard, is widely used to + // mean a POST or other request is idempotent. See + // https://golang.org/issue/19943#issuecomment-421092421 + if headerHas(r.Header, "Idempotency-Key") || headerHas(r.Header, "X-Idempotency-Key") { + return true + } + } + return false +} + +func reqExpectsContinue(r *http.Request) bool { + return hasToken(headerGet(r.Header, "Expect"), "100-continue") +} + +func reqWantsClose(r *http.Request) bool { + if r.Close { + return true + } + return hasToken(headerGet(r.Header, "Connection"), "close") +} diff --git a/internal/altsvcutil/altsvcutil.go b/internal/altsvcutil/altsvcutil.go new file mode 100644 index 00000000..84978d7e --- /dev/null +++ b/internal/altsvcutil/altsvcutil.go @@ -0,0 +1,212 @@ +package altsvcutil + +import ( + "bytes" + "fmt" + "github.com/imroc/req/v3/internal/netutil" + "github.com/imroc/req/v3/pkg/altsvc" + "io" + "net" + "net/url" + "strconv" + "strings" + "time" +) + +type altAvcParser struct { + *bytes.Buffer +} + +// validOptionalPort reports whether port is either an empty string +// or matches /^:\d*$/ +func validOptionalPort(port string) bool { + if port == "" { + return true + } + if port[0] != ':' { + return false + } + for _, b := range port[1:] { + if b < '0' || b > '9' { + return false + } + } + return true +} + +// splitHostPort separates host and port. If the port is not valid, it returns +// the entire input as host, and it doesn't check the validity of the host. +// Unlike net.SplitHostPort, but per RFC 3986, it requires ports to be numeric. +func splitHostPort(hostPort string) (host, port string) { + host = hostPort + + colon := strings.LastIndexByte(host, ':') + if colon != -1 && validOptionalPort(host[colon:]) { + host, port = host[:colon], host[colon+1:] + } + + if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") { + host = host[1 : len(host)-1] + } + return +} + +// ParseHeader parses the AltSvc from header value. +func ParseHeader(value string) ([]*altsvc.AltSvc, error) { + p := newAltSvcParser(value) + return p.Parse() +} + +func newAltSvcParser(value string) *altAvcParser { + buf := bytes.NewBufferString(value) + return &altAvcParser{buf} +} + +var endOfTime = time.Date(9999, 12, 31, 23, 59, 59, 0, time.UTC) + +func (p *altAvcParser) Parse() (as []*altsvc.AltSvc, err error) { + for { + a, e := p.parseOne() + if a != nil { + as = append(as, a) + } + if e != nil { + if e == io.EOF { + return + } else { + err = e + return + } + } + } +} + +func (p *altAvcParser) parseKv() (key, value string, haveNextField bool, err error) { + line, err := p.ReadBytes('=') + if len(line) == 0 { + return + } + key = strings.TrimSpace(string(line[:len(line)-1])) + bs := p.Bytes() + if len(bs) == 0 { + err = io.EOF + return + } + if bs[0] == '"' { + quoteIndex := 0 + for i := 1; i < len(bs); i++ { + if bs[i] == '"' { + quoteIndex = i + break + } + } + if quoteIndex == 0 { + err = fmt.Errorf("quote in alt-svc is not complete: %s", bs) + return + } + value = string(bs[1:quoteIndex]) + p.Next(quoteIndex + 1) + if len(bs) == quoteIndex+1 { + err = io.EOF + return + } + var b byte + b, err = p.ReadByte() + if err != nil { + return + } + if b == ';' { + haveNextField = true + } + } else { + delimIndex := 0 + LOOP: + for i, v := range bs { + switch v { + case ',': + delimIndex = i + break LOOP + case ';': + delimIndex = i + haveNextField = true + break LOOP + } + } + if delimIndex == 0 { + err = io.EOF + value = strings.TrimSpace(string(bs)) + return + } + p.Next(delimIndex + 1) + value = string(bs[:delimIndex]) + } + return +} + +func (p *altAvcParser) parseOne() (as *altsvc.AltSvc, err error) { + proto, addr, haveNextField, err := p.parseKv() + if proto == "" || addr == "" { + return + } + host, port := splitHostPort(addr) + + as = &altsvc.AltSvc{ + Protocol: proto, + Host: host, + Port: port, + Expire: endOfTime, + } + + if !haveNextField { + return + } + + key, ma, haveNextField, err := p.parseKv() + if key == "" || ma == "" { + return + } + if key != "ma" { + err = fmt.Errorf("expect ma field, got %s", key) + return + } + + maInt, err := strconv.ParseInt(ma, 10, 64) + if err != nil { + return + } + as.Expire = time.Now().Add(time.Duration(maInt) * time.Second) + + if !haveNextField { + return + } + + // drain useless fields + for { + _, _, haveNextField, err = p.parseKv() + if haveNextField { + continue + } else { + break + } + } + return +} + +// ConvertURL converts the raw request url to expected alt-svc's url. +func ConvertURL(a *altsvc.AltSvc, u *url.URL) *url.URL { + host, port := netutil.AuthorityHostPort(u.Scheme, u.Host) + uu := *u + modify := false + if a.Host != "" && a.Host != host { + host = a.Host + modify = true + } + if a.Port != "" && a.Port != port { + port = a.Port + modify = true + } + if modify { + uu.Host = net.JoinHostPort(host, port) + } + return &uu +} diff --git a/internal/altsvcutil/altsvcutil_test.go b/internal/altsvcutil/altsvcutil_test.go new file mode 100644 index 00000000..3cbc0019 --- /dev/null +++ b/internal/altsvcutil/altsvcutil_test.go @@ -0,0 +1,14 @@ +package altsvcutil + +import ( + "github.com/imroc/req/v3/internal/tests" + "testing" +) + +func TestParseHeader(t *testing.T) { + as, err := ParseHeader(` h3=":443"; ma=86400, h3-29=":443"; ma=86400`) + tests.AssertNoError(t, err) + tests.AssertEqual(t, 2, len(as)) + tests.AssertEqual(t, "h3", as[0].Protocol) + tests.AssertEqual(t, "443", as[0].Port) +} diff --git a/internal/ascii/print.go b/internal/ascii/print.go new file mode 100644 index 00000000..585e5bab --- /dev/null +++ b/internal/ascii/print.go @@ -0,0 +1,61 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ascii + +import ( + "strings" + "unicode" +) + +// EqualFold is strings.EqualFold, ASCII only. It reports whether s and t +// are equal, ASCII-case-insensitively. +func EqualFold(s, t string) bool { + if len(s) != len(t) { + return false + } + for i := 0; i < len(s); i++ { + if lower(s[i]) != lower(t[i]) { + return false + } + } + return true +} + +// lower returns the ASCII lowercase version of b. +func lower(b byte) byte { + if 'A' <= b && b <= 'Z' { + return b + ('a' - 'A') + } + return b +} + +// IsPrint returns whether s is ASCII and printable according to +// https://tools.ietf.org/html/rfc20#section-4.2. +func IsPrint(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] < ' ' || s[i] > '~' { + return false + } + } + return true +} + +// Is returns whether s is ASCII. +func Is(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] > unicode.MaxASCII { + return false + } + } + return true +} + +// ToLower returns the lowercase version of s if s is ASCII and printable. +func ToLower(s string) (lower string, ok bool) { + if !IsPrint(s) { + return "", false + } + return strings.ToLower(s), true +} diff --git a/internal/ascii/print_test.go b/internal/ascii/print_test.go new file mode 100644 index 00000000..0b7767ca --- /dev/null +++ b/internal/ascii/print_test.go @@ -0,0 +1,95 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ascii + +import "testing" + +func TestEqualFold(t *testing.T) { + var tests = []struct { + name string + a, b string + want bool + }{ + { + name: "empty", + want: true, + }, + { + name: "simple match", + a: "CHUNKED", + b: "chunked", + want: true, + }, + { + name: "same string", + a: "chunked", + b: "chunked", + want: true, + }, + { + name: "Unicode Kelvin symbol", + a: "chun鈩猠d", // This "鈩" is 'KELVIN SIGN' (\u212A) + b: "chunked", + want: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := EqualFold(tt.a, tt.b); got != tt.want { + t.Errorf("AsciiEqualFold(%q,%q): got %v want %v", tt.a, tt.b, got, tt.want) + } + }) + } +} + +func TestIsPrint(t *testing.T) { + var tests = []struct { + name string + in string + want bool + }{ + { + name: "empty", + want: true, + }, + { + name: "ASCII low", + in: "This is a space: ' '", + want: true, + }, + { + name: "ASCII high", + in: "This is a tilde: '~'", + want: true, + }, + { + name: "ASCII low non-print", + in: "This is a unit separator: \x1F", + want: false, + }, + { + name: "Ascii high non-print", + in: "This is a Delete: \x7F", + want: false, + }, + { + name: "Unicode letter", + in: "Today it's 280鈩 outside: it's freezing!", // This "鈩" is 'KELVIN SIGN' (\u212A) + want: false, + }, + { + name: "Unicode emoji", + in: "Gophers like 馃", + want: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := IsPrint(tt.in); got != tt.want { + t.Errorf("IsASCIIPrint(%q): got %v want %v", tt.in, got, tt.want) + } + }) + } +} diff --git a/internal/bisect/bisect.go b/internal/bisect/bisect.go new file mode 100644 index 00000000..3e5a6849 --- /dev/null +++ b/internal/bisect/bisect.go @@ -0,0 +1,794 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package bisect can be used by compilers and other programs +// to serve as a target for the bisect debugging tool. +// See [golang.org/x/tools/cmd/bisect] for details about using the tool. +// +// To be a bisect target, allowing bisect to help determine which of a set of independent +// changes provokes a failure, a program needs to: +// +// 1. Define a way to accept a change pattern on its command line or in its environment. +// The most common mechanism is a command-line flag. +// The pattern can be passed to [New] to create a [Matcher], the compiled form of a pattern. +// +// 2. Assign each change a unique ID. One possibility is to use a sequence number, +// but the most common mechanism is to hash some kind of identifying information +// like the file and line number where the change might be applied. +// [Hash] hashes its arguments to compute an ID. +// +// 3. Enable each change that the pattern says should be enabled. +// The [Matcher.ShouldEnable] method answers this question for a given change ID. +// +// 4. Print a report identifying each change that the pattern says should be printed. +// The [Matcher.ShouldPrint] method answers this question for a given change ID. +// The report consists of one more lines on standard error or standard output +// that contain a 鈥渕atch marker鈥. [Marker] returns the match marker for a given ID. +// When bisect reports a change as causing the failure, it identifies the change +// by printing the report lines with the match marker removed. +// +// # Example Usage +// +// A program starts by defining how it receives the pattern. In this example, we will assume a flag. +// The next step is to compile the pattern: +// +// m, err := bisect.New(patternFlag) +// if err != nil { +// log.Fatal(err) +// } +// +// Then, each time a potential change is considered, the program computes +// a change ID by hashing identifying information (source file and line, in this case) +// and then calls m.ShouldPrint and m.ShouldEnable to decide whether to +// print and enable the change, respectively. The two can return different values +// depending on whether bisect is trying to find a minimal set of changes to +// disable or to enable to provoke the failure. +// +// It is usually helpful to write a helper function that accepts the identifying information +// and then takes care of hashing, printing, and reporting whether the identified change +// should be enabled. For example, a helper for changes identified by a file and line number +// would be: +// +// func ShouldEnable(file string, line int) { +// h := bisect.Hash(file, line) +// if m.ShouldPrint(h) { +// fmt.Fprintf(os.Stderr, "%v %s:%d\n", bisect.Marker(h), file, line) +// } +// return m.ShouldEnable(h) +// } +// +// Finally, note that New returns a nil Matcher when there is no pattern, +// meaning that the target is not running under bisect at all, +// so all changes should be enabled and none should be printed. +// In that common case, the computation of the hash can be avoided entirely +// by checking for m == nil first: +// +// func ShouldEnable(file string, line int) bool { +// if m == nil { +// return true +// } +// h := bisect.Hash(file, line) +// if m.ShouldPrint(h) { +// fmt.Fprintf(os.Stderr, "%v %s:%d\n", bisect.Marker(h), file, line) +// } +// return m.ShouldEnable(h) +// } +// +// When the identifying information is expensive to format, this code can call +// [Matcher.MarkerOnly] to find out whether short report lines containing only the +// marker are permitted for a given run. (Bisect permits such lines when it is +// still exploring the space of possible changes and will not be showing the +// output to the user.) If so, the client can choose to print only the marker: +// +// func ShouldEnable(file string, line int) bool { +// if m == nil { +// return true +// } +// h := bisect.Hash(file, line) +// if m.ShouldPrint(h) { +// if m.MarkerOnly() { +// bisect.PrintMarker(os.Stderr, h) +// } else { +// fmt.Fprintf(os.Stderr, "%v %s:%d\n", bisect.Marker(h), file, line) +// } +// } +// return m.ShouldEnable(h) +// } +// +// This specific helper 鈥 deciding whether to enable a change identified by +// file and line number and printing about the change when necessary 鈥 is +// provided by the [Matcher.FileLine] method. +// +// Another common usage is deciding whether to make a change in a function +// based on the caller's stack, to identify the specific calling contexts that the +// change breaks. The [Matcher.Stack] method takes care of obtaining the stack, +// printing it when necessary, and reporting whether to enable the change +// based on that stack. +// +// # Pattern Syntax +// +// Patterns are generated by the bisect tool and interpreted by [New]. +// Users should not have to understand the patterns except when +// debugging a target's bisect support or debugging the bisect tool itself. +// +// The pattern syntax selecting a change is a sequence of bit strings +// separated by + and - operators. Each bit string denotes the set of +// changes with IDs ending in those bits, + is set addition, - is set subtraction, +// and the expression is evaluated in the usual left-to-right order. +// The special binary number 鈥測鈥 denotes the set of all changes, +// standing in for the empty bit string. +// In the expression, all the + operators must appear before all the - operators. +// A leading + adds to an empty set. A leading - subtracts from the set of all +// possible suffixes. +// +// For example: +// +// - 鈥01+10鈥 and 鈥+01+10鈥 both denote the set of changes +// with IDs ending with the bits 01 or 10. +// +// - 鈥01+10-1001鈥 denotes the set of changes with IDs +// ending with the bits 01 or 10, but excluding those ending in 1001. +// +// - 鈥-01-1000鈥 and 鈥測-01-1000 both denote the set of all changes +// with IDs not ending in 01 nor 1000. +// +// - 鈥0+1-01+001鈥 is not a valid pattern, because all the + operators do not +// appear before all the - operators. +// +// In the syntaxes described so far, the pattern specifies the changes to +// enable and report. If a pattern is prefixed by a 鈥!鈥, the meaning +// changes: the pattern specifies the changes to DISABLE and report. This +// mode of operation is needed when a program passes with all changes +// enabled but fails with no changes enabled. In this case, bisect +// searches for minimal sets of changes to disable. +// Put another way, the leading 鈥!鈥 inverts the result from [Matcher.ShouldEnable] +// but does not invert the result from [Matcher.ShouldPrint]. +// +// As a convenience for manual debugging, 鈥渘鈥 is an alias for 鈥!y鈥, +// meaning to disable and report all changes. +// +// Finally, a leading 鈥渧鈥 in the pattern indicates that the reports will be shown +// to the user of bisect to describe the changes involved in a failure. +// At the API level, the leading 鈥渧鈥 causes [Matcher.Visible] to return true. +// See the next section for details. +// +// # Match Reports +// +// The target program must enable only those changed matched +// by the pattern, and it must print a match report for each such change. +// A match report consists of one or more lines of text that will be +// printed by the bisect tool to describe a change implicated in causing +// a failure. Each line in the report for a given change must contain a +// match marker with that change ID, as returned by [Marker]. +// The markers are elided when displaying the lines to the user. +// +// A match marker has the form 鈥淸bisect-match 0x1234]鈥 where +// 0x1234 is the change ID in hexadecimal. +// An alternate form is 鈥淸bisect-match 010101]鈥, giving the change ID in binary. +// +// When [Matcher.Visible] returns false, the match reports are only +// being processed by bisect to learn the set of enabled changes, +// not shown to the user, meaning that each report can be a match +// marker on a line by itself, eliding the usual textual description. +// When the textual description is expensive to compute, +// checking [Matcher.Visible] can help the avoid that expense +// in most runs. +package bisect + +import ( + "runtime" + "sync" + "sync/atomic" + "unsafe" +) + +// New creates and returns a new Matcher implementing the given pattern. +// The pattern syntax is defined in the package doc comment. +// +// In addition to the pattern syntax syntax, New("") returns nil, nil. +// The nil *Matcher is valid for use: it returns true from ShouldEnable +// and false from ShouldPrint for all changes. Callers can avoid calling +// [Hash], [Matcher.ShouldEnable], and [Matcher.ShouldPrint] entirely +// when they recognize the nil Matcher. +func New(pattern string) (*Matcher, error) { + if pattern == "" { + return nil, nil + } + + m := new(Matcher) + + p := pattern + // Special case for leading 'q' so that 'qn' quietly disables, e.g. fmahash=qn to disable fma + // Any instance of 'v' disables 'q'. + if len(p) > 0 && p[0] == 'q' { + m.quiet = true + p = p[1:] + if p == "" { + return nil, &parseError{"invalid pattern syntax: " + pattern} + } + } + // Allow multiple v, so that 鈥渂isect cmd vPATTERN鈥 can force verbose all the time. + for len(p) > 0 && p[0] == 'v' { + m.verbose = true + m.quiet = false + p = p[1:] + if p == "" { + return nil, &parseError{"invalid pattern syntax: " + pattern} + } + } + + // Allow multiple !, each negating the last, so that 鈥渂isect cmd !PATTERN鈥 works + // even when bisect chooses to add its own !. + m.enable = true + for len(p) > 0 && p[0] == '!' { + m.enable = !m.enable + p = p[1:] + if p == "" { + return nil, &parseError{"invalid pattern syntax: " + pattern} + } + } + + if p == "n" { + // n is an alias for !y. + m.enable = !m.enable + p = "y" + } + + // Parse actual pattern syntax. + result := true + bits := uint64(0) + start := 0 + wid := 1 // 1-bit (binary); sometimes 4-bit (hex) + for i := 0; i <= len(p); i++ { + // Imagine a trailing - at the end of the pattern to flush final suffix + c := byte('-') + if i < len(p) { + c = p[i] + } + if i == start && wid == 1 && c == 'x' { // leading x for hex + start = i + 1 + wid = 4 + continue + } + switch c { + default: + return nil, &parseError{"invalid pattern syntax: " + pattern} + case '2', '3', '4', '5', '6', '7', '8', '9': + if wid != 4 { + return nil, &parseError{"invalid pattern syntax: " + pattern} + } + fallthrough + case '0', '1': + bits <<= wid + bits |= uint64(c - '0') + case 'a', 'b', 'c', 'd', 'e', 'f', 'A', 'B', 'C', 'D', 'E', 'F': + if wid != 4 { + return nil, &parseError{"invalid pattern syntax: " + pattern} + } + bits <<= 4 + bits |= uint64(c&^0x20 - 'A' + 10) + case 'y': + if i+1 < len(p) && (p[i+1] == '0' || p[i+1] == '1') { + return nil, &parseError{"invalid pattern syntax: " + pattern} + } + bits = 0 + case '+', '-': + if c == '+' && result == false { + // Have already seen a -. Should be - from here on. + return nil, &parseError{"invalid pattern syntax (+ after -): " + pattern} + } + if i > 0 { + n := (i - start) * wid + if n > 64 { + return nil, &parseError{"pattern bits too long: " + pattern} + } + if n <= 0 { + return nil, &parseError{"invalid pattern syntax: " + pattern} + } + if p[start] == 'y' { + n = 0 + } + mask := uint64(1)<= 0; i-- { + c := &m.list[i] + if id&c.mask == c.bits { + return c.result + } + } + return false +} + +// FileLine reports whether the change identified by file and line should be enabled. +// If the change should be printed, FileLine prints a one-line report to w. +func (m *Matcher) FileLine(w Writer, file string, line int) bool { + if m == nil { + return true + } + return m.fileLine(w, file, line) +} + +// fileLine does the real work for FileLine. +// This lets FileLine's body handle m == nil and potentially be inlined. +func (m *Matcher) fileLine(w Writer, file string, line int) bool { + h := Hash(file, line) + if m.ShouldPrint(h) { + if m.MarkerOnly() { + PrintMarker(w, h) + } else { + printFileLine(w, h, file, line) + } + } + return m.ShouldEnable(h) +} + +// printFileLine prints a non-marker-only report for file:line to w. +func printFileLine(w Writer, h uint64, file string, line int) error { + const markerLen = 40 // overestimate + b := make([]byte, 0, markerLen+len(file)+24) + b = AppendMarker(b, h) + b = appendFileLine(b, file, line) + b = append(b, '\n') + _, err := w.Write(b) + return err +} + +// appendFileLine appends file:line to dst, returning the extended slice. +func appendFileLine(dst []byte, file string, line int) []byte { + dst = append(dst, file...) + dst = append(dst, ':') + u := uint(line) + if line < 0 { + dst = append(dst, '-') + u = -u + } + var buf [24]byte + i := len(buf) + for i == len(buf) || u > 0 { + i-- + buf[i] = '0' + byte(u%10) + u /= 10 + } + dst = append(dst, buf[i:]...) + return dst +} + +// MatchStack assigns the current call stack a change ID. +// If the stack should be printed, MatchStack prints it. +// Then MatchStack reports whether a change at the current call stack should be enabled. +func (m *Matcher) Stack(w Writer) bool { + if m == nil { + return true + } + return m.stack(w) +} + +// stack does the real work for Stack. +// This lets stack's body handle m == nil and potentially be inlined. +func (m *Matcher) stack(w Writer) bool { + const maxStack = 16 + var stk [maxStack]uintptr + n := runtime.Callers(2, stk[:]) + // caller #2 is not for printing; need it to normalize PCs if ASLR. + if n <= 1 { + return false + } + + base := stk[0] + // normalize PCs + for i := range stk[:n] { + stk[i] -= base + } + + h := Hash(stk[:n]) + if m.ShouldPrint(h) { + var d *dedup + for { + d = m.dedup.Load() + if d != nil { + break + } + d = new(dedup) + if m.dedup.CompareAndSwap(nil, d) { + break + } + } + + if m.MarkerOnly() { + if !d.seenLossy(h) { + PrintMarker(w, h) + } + } else { + if !d.seen(h) { + // Restore PCs in stack for printing + for i := range stk[:n] { + stk[i] += base + } + printStack(w, h, stk[1:n]) + } + } + } + return m.ShouldEnable(h) +} + +// Writer is the same interface as io.Writer. +// It is duplicated here to avoid importing io. +type Writer interface { + Write([]byte) (int, error) +} + +// PrintMarker prints to w a one-line report containing only the marker for h. +// It is appropriate to use when [Matcher.ShouldPrint] and [Matcher.MarkerOnly] both return true. +func PrintMarker(w Writer, h uint64) error { + var buf [50]byte + b := AppendMarker(buf[:0], h) + b = append(b, '\n') + _, err := w.Write(b) + return err +} + +// printStack prints to w a multi-line report containing a formatting of the call stack stk, +// with each line preceded by the marker for h. +func printStack(w Writer, h uint64, stk []uintptr) error { + buf := make([]byte, 0, 2048) + + var prefixBuf [100]byte + prefix := AppendMarker(prefixBuf[:0], h) + + frames := runtime.CallersFrames(stk) + for { + f, more := frames.Next() + buf = append(buf, prefix...) + buf = append(buf, f.Func.Name()...) + buf = append(buf, "()\n"...) + buf = append(buf, prefix...) + buf = append(buf, '\t') + buf = appendFileLine(buf, f.File, f.Line) + buf = append(buf, '\n') + if !more { + break + } + } + buf = append(buf, prefix...) + buf = append(buf, '\n') + _, err := w.Write(buf) + return err +} + +// Marker returns the match marker text to use on any line reporting details +// about a match of the given ID. +// It always returns the hexadecimal format. +func Marker(id uint64) string { + return string(AppendMarker(nil, id)) +} + +// AppendMarker is like [Marker] but appends the marker to dst. +func AppendMarker(dst []byte, id uint64) []byte { + const prefix = "[bisect-match 0x" + var buf [len(prefix) + 16 + 1]byte + copy(buf[:], prefix) + for i := 0; i < 16; i++ { + buf[len(prefix)+i] = "0123456789abcdef"[id>>60] + id <<= 4 + } + buf[len(prefix)+16] = ']' + return append(dst, buf[:]...) +} + +// CutMarker finds the first match marker in line and removes it, +// returning the shortened line (with the marker removed), +// the ID from the match marker, +// and whether a marker was found at all. +// If there is no marker, CutMarker returns line, 0, false. +func CutMarker(line string) (short string, id uint64, ok bool) { + // Find first instance of prefix. + prefix := "[bisect-match " + i := 0 + for ; ; i++ { + if i >= len(line)-len(prefix) { + return line, 0, false + } + if line[i] == '[' && line[i:i+len(prefix)] == prefix { + break + } + } + + // Scan to ]. + j := i + len(prefix) + for j < len(line) && line[j] != ']' { + j++ + } + if j >= len(line) { + return line, 0, false + } + + // Parse id. + idstr := line[i+len(prefix) : j] + if len(idstr) >= 3 && idstr[:2] == "0x" { + // parse hex + if len(idstr) > 2+16 { // max 0x + 16 digits + return line, 0, false + } + for i := 2; i < len(idstr); i++ { + id <<= 4 + switch c := idstr[i]; { + case '0' <= c && c <= '9': + id |= uint64(c - '0') + case 'a' <= c && c <= 'f': + id |= uint64(c - 'a' + 10) + case 'A' <= c && c <= 'F': + id |= uint64(c - 'A' + 10) + } + } + } else { + if idstr == "" || len(idstr) > 64 { // min 1 digit, max 64 digits + return line, 0, false + } + // parse binary + for i := 0; i < len(idstr); i++ { + id <<= 1 + switch c := idstr[i]; c { + default: + return line, 0, false + case '0', '1': + id |= uint64(c - '0') + } + } + } + + // Construct shortened line. + // Remove at most one space from around the marker, + // so that "foo [marker] bar" shortens to "foo bar". + j++ // skip ] + if i > 0 && line[i-1] == ' ' { + i-- + } else if j < len(line) && line[j] == ' ' { + j++ + } + short = line[:i] + line[j:] + return short, id, true +} + +// Hash computes a hash of the data arguments, +// each of which must be of type string, byte, int, uint, int32, uint32, int64, uint64, uintptr, or a slice of one of those types. +func Hash(data ...any) uint64 { + h := offset64 + for _, v := range data { + switch v := v.(type) { + default: + // Note: Not printing the type, because reflect.ValueOf(v) + // would make the interfaces prepared by the caller escape + // and therefore allocate. This way, Hash(file, line) runs + // without any allocation. It should be clear from the + // source code calling Hash what the bad argument was. + panic("bisect.Hash: unexpected argument type") + case string: + h = fnvString(h, v) + case byte: + h = fnv(h, v) + case int: + h = fnvUint64(h, uint64(v)) + case uint: + h = fnvUint64(h, uint64(v)) + case int32: + h = fnvUint32(h, uint32(v)) + case uint32: + h = fnvUint32(h, v) + case int64: + h = fnvUint64(h, uint64(v)) + case uint64: + h = fnvUint64(h, v) + case uintptr: + h = fnvUint64(h, uint64(v)) + case []string: + for _, x := range v { + h = fnvString(h, x) + } + case []byte: + for _, x := range v { + h = fnv(h, x) + } + case []int: + for _, x := range v { + h = fnvUint64(h, uint64(x)) + } + case []uint: + for _, x := range v { + h = fnvUint64(h, uint64(x)) + } + case []int32: + for _, x := range v { + h = fnvUint32(h, uint32(x)) + } + case []uint32: + for _, x := range v { + h = fnvUint32(h, x) + } + case []int64: + for _, x := range v { + h = fnvUint64(h, uint64(x)) + } + case []uint64: + for _, x := range v { + h = fnvUint64(h, x) + } + case []uintptr: + for _, x := range v { + h = fnvUint64(h, uint64(x)) + } + } + } + return h +} + +// Trivial error implementation, here to avoid importing errors. + +// parseError is a trivial error implementation, +// defined here to avoid importing errors. +type parseError struct{ text string } + +func (e *parseError) Error() string { return e.text } + +// FNV-1a implementation. See Go's hash/fnv/fnv.go. +// Copied here for simplicity (can handle integers more directly) +// and to avoid importing hash/fnv. + +const ( + offset64 uint64 = 14695981039346656037 + prime64 uint64 = 1099511628211 +) + +func fnv(h uint64, x byte) uint64 { + h ^= uint64(x) + h *= prime64 + return h +} + +func fnvString(h uint64, x string) uint64 { + for i := 0; i < len(x); i++ { + h ^= uint64(x[i]) + h *= prime64 + } + return h +} + +func fnvUint64(h uint64, x uint64) uint64 { + for i := 0; i < 8; i++ { + h ^= x & 0xFF + x >>= 8 + h *= prime64 + } + return h +} + +func fnvUint32(h uint64, x uint32) uint64 { + for i := 0; i < 4; i++ { + h ^= uint64(x & 0xFF) + x >>= 8 + h *= prime64 + } + return h +} + +// A dedup is a deduplicator for call stacks, so that we only print +// a report for new call stacks, not for call stacks we've already +// reported. +// +// It has two modes: an approximate but lock-free mode that +// may still emit some duplicates, and a precise mode that uses +// a lock and never emits duplicates. +type dedup struct { + // 128-entry 4-way, lossy cache for seenLossy + recent [128][4]uint64 + + // complete history for seen + mu sync.Mutex + m map[uint64]bool +} + +// seen records that h has now been seen and reports whether it was seen before. +// When seen returns false, the caller is expected to print a report for h. +func (d *dedup) seen(h uint64) bool { + d.mu.Lock() + if d.m == nil { + d.m = make(map[uint64]bool) + } + seen := d.m[h] + d.m[h] = true + d.mu.Unlock() + return seen +} + +// seenLossy is a variant of seen that avoids a lock by using a cache of recently seen hashes. +// Each cache entry is N-way set-associative: h can appear in any of the slots. +// If h does not appear in any of them, then it is inserted into a random slot, +// overwriting whatever was there before. +func (d *dedup) seenLossy(h uint64) bool { + cache := &d.recent[uint(h)%uint(len(d.recent))] + for i := 0; i < len(cache); i++ { + if atomic.LoadUint64(&cache[i]) == h { + return true + } + } + + // Compute index in set to evict as hash of current set. + ch := offset64 + for _, x := range cache { + ch = fnvUint64(ch, x) + } + atomic.StoreUint64(&cache[uint(ch)%uint(len(cache))], h) + return false +} diff --git a/internal/charsets/.testdata/HTTP-vs-UTF-8-BOM.html b/internal/charsets/.testdata/HTTP-vs-UTF-8-BOM.html new file mode 100644 index 00000000..26e5d8b4 --- /dev/null +++ b/internal/charsets/.testdata/HTTP-vs-UTF-8-BOM.html @@ -0,0 +1,48 @@ +锘 + + + HTTP vs UTF-8 BOM + + + + + + + + + + + +

HTTP vs UTF-8 BOM

+ + +
+ + +
 
+ + + + + +
+

A character encoding set in the HTTP header has lower precedence than the UTF-8 signature.

+

The HTTP header attempts to set the character encoding to ISO 8859-15. The page starts with a UTF-8 signature.

The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector .test div.ýäè. This matches the sequence of bytes above when they are interpreted as UTF-8. If the class name matches the selector then the test will pass.

If the test is unsuccessful, the characters  should appear at the top of the page. These represent the bytes that make up the UTF-8 signature when encountered in the ISO 8859-15 encoding.

+
+
+
Next test
HTML5
+

the-input-byte-stream-034
Result summary & related tests
Detailed results for this test
Link to spec

+
Assumptions:
  • The default encoding for the browser you are testing is not set to ISO 8859-15.
  • +
  • The test is read from a server that supports HTTP.
+
+ + + + + + diff --git a/internal/charsets/.testdata/UTF-16BE-BOM.html b/internal/charsets/.testdata/UTF-16BE-BOM.html new file mode 100644 index 00000000..3abf7a93 Binary files /dev/null and b/internal/charsets/.testdata/UTF-16BE-BOM.html differ diff --git a/internal/charsets/.testdata/UTF-16LE-BOM.html b/internal/charsets/.testdata/UTF-16LE-BOM.html new file mode 100644 index 00000000..76254c98 Binary files /dev/null and b/internal/charsets/.testdata/UTF-16LE-BOM.html differ diff --git a/internal/charsets/.testdata/UTF-8-BOM-vs-meta-charset.html b/internal/charsets/.testdata/UTF-8-BOM-vs-meta-charset.html new file mode 100644 index 00000000..83de4333 --- /dev/null +++ b/internal/charsets/.testdata/UTF-8-BOM-vs-meta-charset.html @@ -0,0 +1,49 @@ +锘 + + + UTF-8 BOM vs meta charset + + + + + + + + + + + +

UTF-8 BOM vs meta charset

+ + +
+ + +
 
+ + + + + +
+

A page with a UTF-8 BOM will be recognized as UTF-8 even if the meta charset attribute declares a different encoding.

+

The page contains an encoding declaration in a meta charset attribute that attempts to set the character encoding to ISO 8859-15, but the file starts with a UTF-8 signature.

The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector .test div.ýäè. This matches the sequence of bytes above when they are interpreted as UTF-8. If the class name matches the selector then the test will pass.

+
+
+
Next test
HTML5
+

the-input-byte-stream-038
Result summary & related tests
Detailed results for this test
Link to spec

+
Assumptions:
  • The default encoding for the browser you are testing is not set to ISO 8859-15.
  • +
  • The test is read from a server that supports HTTP.
+
+ + + + + + diff --git a/internal/charsets/.testdata/UTF-8-BOM-vs-meta-content.html b/internal/charsets/.testdata/UTF-8-BOM-vs-meta-content.html new file mode 100644 index 00000000..501aac2d --- /dev/null +++ b/internal/charsets/.testdata/UTF-8-BOM-vs-meta-content.html @@ -0,0 +1,48 @@ +锘 + + + UTF-8 BOM vs meta content + + + + + + + + + + + +

UTF-8 BOM vs meta content

+ + +
+ + +
 
+ + + + + +
+

A page with a UTF-8 BOM will be recognized as UTF-8 even if the meta content attribute declares a different encoding.

+

The page contains an encoding declaration in a meta content attribute that attempts to set the character encoding to ISO 8859-15, but the file starts with a UTF-8 signature.

The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector .test div.ýäè. This matches the sequence of bytes above when they are interpreted as UTF-8. If the class name matches the selector then the test will pass.

+
+
+
Next test
HTML5
+

the-input-byte-stream-037
Result summary & related tests
Detailed results for this test
Link to spec

+
Assumptions:
  • The default encoding for the browser you are testing is not set to ISO 8859-15.
  • +
  • The test is read from a server that supports HTTP.
+
+ + + + + + diff --git a/internal/charsets/.testdata/meta-charset-attribute.html b/internal/charsets/.testdata/meta-charset-attribute.html new file mode 100644 index 00000000..2d7d25ab --- /dev/null +++ b/internal/charsets/.testdata/meta-charset-attribute.html @@ -0,0 +1,48 @@ + + + + meta charset attribute + + + + + + + + + + + +

meta charset attribute

+ + +
+ + +
 
+ + + + + +
+

The character encoding of the page can be set by a meta element with charset attribute.

+

The only character encoding declaration for this HTML file is in the charset attribute of the meta element, which declares the encoding to be ISO 8859-15.

The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector .test div.ÜÀÚ. This matches the sequence of bytes above when they are interpreted as ISO 8859-15. If the class name matches the selector then the test will pass.

+
+
+
Next test
HTML5
+

the-input-byte-stream-009
Result summary & related tests
Detailed results for this test
Link to spec

+
Assumptions:
  • The default encoding for the browser you are testing is not set to ISO 8859-15.
  • +
  • The test is read from a server that supports HTTP.
+
+ + + + + + diff --git a/internal/charsets/.testdata/meta-content-attribute.html b/internal/charsets/.testdata/meta-content-attribute.html new file mode 100644 index 00000000..1c3f228e --- /dev/null +++ b/internal/charsets/.testdata/meta-content-attribute.html @@ -0,0 +1,48 @@ + + + + meta content attribute + + + + + + + + + + + +

meta content attribute

+ + +
+ + +
 
+ + + + + +
+

The character encoding of the page can be set by a meta element with http-equiv and content attributes.

+

The only character encoding declaration for this HTML file is in the content attribute of the meta element, which declares the encoding to be ISO 8859-15.

The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector .test div.ÜÀÚ. This matches the sequence of bytes above when they are interpreted as ISO 8859-15. If the class name matches the selector then the test will pass.

+
+
+
Next test
HTML5
+

the-input-byte-stream-007
Result summary & related tests
Detailed results for this test
Link to spec

+
Assumptions:
  • The default encoding for the browser you are testing is not set to ISO 8859-15.
  • +
  • The test is read from a server that supports HTTP.
+
+ + + + + + diff --git a/internal/charsets/charsets.go b/internal/charsets/charsets.go new file mode 100644 index 00000000..cb910660 --- /dev/null +++ b/internal/charsets/charsets.go @@ -0,0 +1,152 @@ +package charsets + +import ( + "bytes" + "golang.org/x/net/html" + htmlcharset "golang.org/x/net/html/charset" + "golang.org/x/text/encoding" + "strings" +) + +var boms = []struct { + bom []byte + enc string +}{ + {[]byte{0xfe, 0xff}, "utf-16be"}, + {[]byte{0xff, 0xfe}, "utf-16le"}, + {[]byte{0xef, 0xbb, 0xbf}, "utf-8"}, +} + +// FindEncoding sniff and find the encoding of the content. +func FindEncoding(content []byte) (enc encoding.Encoding, name string) { + if len(content) == 0 { + return + } + for _, b := range boms { + if bytes.HasPrefix(content, b.bom) { + enc, name = htmlcharset.Lookup(b.enc) + if enc != nil { + if strings.ToLower(name) == "utf-8" { + enc = nil + } + return + } + } + } + enc, name = prescan(content) + if strings.ToLower(name) == "utf-8" { + enc = nil + } + return +} + +func prescan(content []byte) (e encoding.Encoding, name string) { + z := html.NewTokenizer(bytes.NewReader(content)) + for { + switch z.Next() { + case html.ErrorToken: + return nil, "" + + case html.StartTagToken, html.SelfClosingTagToken: + tagName, hasAttr := z.TagName() + if !bytes.Equal(tagName, []byte("meta")) { + continue + } + attrList := make(map[string]bool) + gotPragma := false + + const ( + dontKnow = iota + doNeedPragma + doNotNeedPragma + ) + needPragma := dontKnow + + name = "" + e = nil + for hasAttr { + var key, val []byte + key, val, hasAttr = z.TagAttr() + ks := string(key) + if attrList[ks] { + continue + } + attrList[ks] = true + for i, c := range val { + if 'A' <= c && c <= 'Z' { + val[i] = c + 0x20 + } + } + + switch ks { + case "http-equiv": + if bytes.Equal(val, []byte("content-type")) { + gotPragma = true + } + + case "content": + if e == nil { + name = fromMetaElement(string(val)) + if name != "" { + e, name = htmlcharset.Lookup(name) + if e != nil { + needPragma = doNeedPragma + } + } + } + + case "charset": + e, name = htmlcharset.Lookup(string(val)) + needPragma = doNotNeedPragma + } + } + + if needPragma == dontKnow || needPragma == doNeedPragma && !gotPragma { + continue + } + + if strings.HasPrefix(name, "utf-16") { + name = "utf-8" + e = encoding.Nop + } + + if e != nil { + return e, name + } + } + } +} + +func fromMetaElement(s string) string { + for s != "" { + csLoc := strings.Index(s, "charset") + if csLoc == -1 { + return "" + } + s = s[csLoc+len("charset"):] + s = strings.TrimLeft(s, " \t\n\f\r") + if !strings.HasPrefix(s, "=") { + continue + } + s = s[1:] + s = strings.TrimLeft(s, " \t\n\f\r") + if s == "" { + return "" + } + if q := s[0]; q == '"' || q == '\'' { + s = s[1:] + closeQuote := strings.IndexRune(s, rune(q)) + if closeQuote == -1 { + return "" + } + return s[:closeQuote] + } + + end := strings.IndexAny(s, "; \t\n\f\r") + if end == -1 { + end = len(s) + } + return s[:end] + } + return "" +} diff --git a/internal/charsets/charsets_test.go b/internal/charsets/charsets_test.go new file mode 100644 index 00000000..28cd698a --- /dev/null +++ b/internal/charsets/charsets_test.go @@ -0,0 +1,66 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package charsets + +import ( + "github.com/imroc/req/v3/internal/tests" + "os" + "runtime" + "testing" +) + +var sniffTestCases = []struct { + filename, want string +}{ + {"UTF-16LE-BOM.html", "utf-16le"}, + {"UTF-16BE-BOM.html", "utf-16be"}, + {"meta-content-attribute.html", "iso-8859-15"}, + {"meta-charset-attribute.html", "iso-8859-15"}, + {"HTTP-vs-UTF-8-BOM.html", "utf-8"}, + {"UTF-8-BOM-vs-meta-content.html", "utf-8"}, + {"UTF-8-BOM-vs-meta-charset.html", "utf-8"}, +} + +func TestSniff(t *testing.T) { + switch runtime.GOOS { + case "nacl": // platforms that don't permit direct file system access + t.Skipf("not supported on %q", runtime.GOOS) + } + + for _, tc := range sniffTestCases { + content, err := os.ReadFile(tests.GetTestFilePath(tc.filename)) + if err != nil { + t.Errorf("%s: error reading file: %v", tc.filename, err) + continue + } + + _, name := FindEncoding(content) + if name != tc.want { + t.Errorf("%s: got %q, want %q", tc.filename, name, tc.want) + continue + } + } +} + +var metaTestCases = []struct { + meta, want string +}{ + {"", ""}, + {"text/html", ""}, + {"text/html; charset utf-8", ""}, + {"text/html; charset=latin-2", "latin-2"}, + {"text/html; charset; charset = utf-8", "utf-8"}, + {`charset="big5"`, "big5"}, + {"charset='shift_jis'", "shift_jis"}, +} + +func TestFromMeta(t *testing.T) { + for _, tc := range metaTestCases { + got := fromMetaElement(tc.meta) + if got != tc.want { + t.Errorf("%q: got %q, want %q", tc.meta, got, tc.want) + } + } +} diff --git a/internal/chunked.go b/internal/chunked.go new file mode 100644 index 00000000..dec4ddca --- /dev/null +++ b/internal/chunked.go @@ -0,0 +1,263 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The wire protocol for HTTP's "chunked" Transfer-Encoding. + +// Package internal contains HTTP internals shared by net/http and +// net/http/httputil. +package internal + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "github.com/imroc/req/v3/internal/util" + "io" +) + +const maxLineLength = 4096 // assumed <= bufio.defaultBufSize + +// ErrLineTooLong is the error that header line too long. +var ErrLineTooLong = errors.New("header line too long") + +// NewChunkedReader returns a new chunkedReader that translates the data read from r +// out of HTTP "chunked" format before returning it. +// The chunkedReader returns io.EOF when the final 0-length chunk is read. +// +// NewChunkedReader is not needed by normal applications. The http package +// automatically decodes chunking when reading response bodies. +func NewChunkedReader(r io.Reader) io.Reader { + br, ok := r.(*bufio.Reader) + if !ok { + br = bufio.NewReader(r) + } + return &chunkedReader{r: br} +} + +type chunkedReader struct { + r *bufio.Reader + n uint64 // unread bytes in chunk + err error + buf [2]byte + checkEnd bool // whether need to check for \r\n chunk footer +} + +func (cr *chunkedReader) beginChunk() { + // chunk-size CRLF + var line []byte + line, cr.err = readChunkLine(cr.r) + if cr.err != nil { + return + } + cr.n, cr.err = parseHexUint(line) + if cr.err != nil { + return + } + if cr.n == 0 { + cr.err = io.EOF + } +} + +func (cr *chunkedReader) chunkHeaderAvailable() bool { + n := cr.r.Buffered() + if n > 0 { + peek, _ := cr.r.Peek(n) + return bytes.IndexByte(peek, '\n') >= 0 + } + return false +} + +func (cr *chunkedReader) Read(b []uint8) (n int, err error) { + for cr.err == nil { + if cr.checkEnd { + if n > 0 && cr.r.Buffered() < 2 { + // We have some data. Return early (per the io.Reader + // contract) instead of potentially blocking while + // reading more. + break + } + if _, cr.err = io.ReadFull(cr.r, cr.buf[:2]); cr.err == nil { + if string(cr.buf[:]) != "\r\n" { + cr.err = errors.New("malformed chunked encoding") + break + } + } else { + if cr.err == io.EOF { + cr.err = io.ErrUnexpectedEOF + } + break + } + cr.checkEnd = false + } + if cr.n == 0 { + if n > 0 && !cr.chunkHeaderAvailable() { + // We've read enough. Don't potentially block + // reading a new chunk header. + break + } + cr.beginChunk() + continue + } + if len(b) == 0 { + break + } + rbuf := b + if uint64(len(rbuf)) > cr.n { + rbuf = rbuf[:cr.n] + } + var n0 int + n0, cr.err = cr.r.Read(rbuf) + n += n0 + b = b[n0:] + cr.n -= uint64(n0) + // If we're at the end of a chunk, read the next two + // bytes to verify they are "\r\n". + if cr.n == 0 && cr.err == nil { + cr.checkEnd = true + } else if cr.err == io.EOF { + cr.err = io.ErrUnexpectedEOF + } + } + return n, cr.err +} + +// Read a line of bytes (up to \n) from b. +// Give up if the line exceeds maxLineLength. +// The returned bytes are owned by the bufio.Reader +// so they are only valid until the next bufio read. +func readChunkLine(b *bufio.Reader) ([]byte, error) { + p, err := b.ReadSlice('\n') + if err != nil { + // We always know when EOF is coming. + // If the caller asked for a line, there should be a line. + if err == io.EOF { + err = io.ErrUnexpectedEOF + } else if err == bufio.ErrBufferFull { + err = ErrLineTooLong + } + return nil, err + } + if len(p) >= maxLineLength { + return nil, ErrLineTooLong + } + p = trimTrailingWhitespace(p) + p, err = removeChunkExtension(p) + if err != nil { + return nil, err + } + return p, nil +} + +func trimTrailingWhitespace(b []byte) []byte { + for len(b) > 0 && isASCIISpace(b[len(b)-1]) { + b = b[:len(b)-1] + } + return b +} + +func isASCIISpace(b byte) bool { + return b == ' ' || b == '\t' || b == '\n' || b == '\r' +} + +var semi = []byte(";") + +// removeChunkExtension removes any chunk-extension from p. +// For example, +// "0" => "0" +// "0;token" => "0" +// "0;token=val" => "0" +// `0;token="quoted string"` => "0" +func removeChunkExtension(p []byte) ([]byte, error) { + p, _, _ = util.CutBytes(p, semi) + // TODO: care about exact syntax of chunk extensions? We're + // ignoring and stripping them anyway. For now just never + // return an error. + return p, nil +} + +// NewChunkedWriter returns a new chunkedWriter that translates writes into HTTP +// "chunked" format before writing them to w. Closing the returned chunkedWriter +// sends the final 0-length chunk that marks the end of the stream but does +// not send the final CRLF that appears after trailers; trailers and the last +// CRLF must be written separately. +// +// NewChunkedWriter is not needed by normal applications. The http +// package adds chunking automatically if handlers don't set a +// Content-Length header. Using newChunkedWriter inside a handler +// would result in double chunking or chunking with a Content-Length +// length, both of which are wrong. +func NewChunkedWriter(w io.Writer) io.WriteCloser { + return &chunkedWriter{w} +} + +// Writing to chunkedWriter translates to writing in HTTP chunked Transfer +// Encoding wire format to the underlying Wire chunkedWriter. +type chunkedWriter struct { + Wire io.Writer +} + +// Write the contents of data as one chunk to Wire. +// NOTE: Note that the corresponding chunk-writing procedure in Conn.Write has +// a bug since it does not check for success of io.WriteString +func (cw *chunkedWriter) Write(data []byte) (n int, err error) { + + // Don't send 0-length data. It looks like EOF for chunked encoding. + if len(data) == 0 { + return 0, nil + } + + if _, err = fmt.Fprintf(cw.Wire, "%x\r\n", len(data)); err != nil { + return 0, err + } + if n, err = cw.Wire.Write(data); err != nil { + return + } + if n != len(data) { + err = io.ErrShortWrite + return + } + if _, err = io.WriteString(cw.Wire, "\r\n"); err != nil { + return + } + if bw, ok := cw.Wire.(*FlushAfterChunkWriter); ok { + err = bw.Flush() + } + return +} + +func (cw *chunkedWriter) Close() error { + _, err := io.WriteString(cw.Wire, "0\r\n") + return err +} + +// FlushAfterChunkWriter signals from the caller of NewChunkedWriter +// that each chunk should be followed by a flush. It is used by the +// http.Transport code to keep the buffering behavior for headers and +// trailers, but flush out chunks aggressively in the middle for +// request bodies which may be generated slowly. See Issue 6574. +type FlushAfterChunkWriter struct { + *bufio.Writer +} + +func parseHexUint(v []byte) (n uint64, err error) { + for i, b := range v { + switch { + case '0' <= b && b <= '9': + b = b - '0' + case 'a' <= b && b <= 'f': + b = b - 'a' + 10 + case 'A' <= b && b <= 'F': + b = b - 'A' + 10 + default: + return 0, errors.New("invalid byte in chunk length") + } + if i == 16 { + return 0, errors.New("http chunk length too large") + } + n <<= 4 + n |= uint64(b) + } + return +} diff --git a/internal/chunked_test.go b/internal/chunked_test.go new file mode 100644 index 00000000..5e29a786 --- /dev/null +++ b/internal/chunked_test.go @@ -0,0 +1,241 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strings" + "testing" + "testing/iotest" +) + +func TestChunk(t *testing.T) { + var b bytes.Buffer + + w := NewChunkedWriter(&b) + const chunk1 = "hello, " + const chunk2 = "world! 0123456789abcdef" + w.Write([]byte(chunk1)) + w.Write([]byte(chunk2)) + w.Close() + + if g, e := b.String(), "7\r\nhello, \r\n17\r\nworld! 0123456789abcdef\r\n0\r\n"; g != e { + t.Fatalf("chunk writer wrote %q; want %q", g, e) + } + + r := NewChunkedReader(&b) + data, err := io.ReadAll(r) + if err != nil { + t.Logf(`data: "%s"`, data) + t.Fatalf("ReadAll from reader: %v", err) + } + if g, e := string(data), chunk1+chunk2; g != e { + t.Errorf("chunk reader read %q; want %q", g, e) + } +} + +func TestChunkReadMultiple(t *testing.T) { + // Bunch of small chunks, all read together. + { + var b bytes.Buffer + w := NewChunkedWriter(&b) + w.Write([]byte("foo")) + w.Write([]byte("bar")) + w.Close() + + r := NewChunkedReader(&b) + buf := make([]byte, 10) + n, err := r.Read(buf) + if n != 6 || err != io.EOF { + t.Errorf("Read = %d, %v; want 6, EOF", n, err) + } + buf = buf[:n] + if string(buf) != "foobar" { + t.Errorf("Read = %q; want %q", buf, "foobar") + } + } + + // One big chunk followed by a little chunk, but the small bufio.Reader size + // should prevent the second chunk header from being read. + { + var b bytes.Buffer + w := NewChunkedWriter(&b) + // fillBufChunk is 11 bytes + 3 bytes header + 2 bytes footer = 16 bytes, + // the same as the bufio ReaderSize below (the minimum), so even + // though we're going to try to Read with a buffer larger enough to also + // receive "foo", the second chunk header won't be read yet. + const fillBufChunk = "0123456789a" + const shortChunk = "foo" + w.Write([]byte(fillBufChunk)) + w.Write([]byte(shortChunk)) + w.Close() + + r := NewChunkedReader(bufio.NewReaderSize(&b, 16)) + buf := make([]byte, len(fillBufChunk)+len(shortChunk)) + n, err := r.Read(buf) + if n != len(fillBufChunk) || err != nil { + t.Errorf("Read = %d, %v; want %d, nil", n, err, len(fillBufChunk)) + } + buf = buf[:n] + if string(buf) != fillBufChunk { + t.Errorf("Read = %q; want %q", buf, fillBufChunk) + } + + n, err = r.Read(buf) + if n != len(shortChunk) || err != io.EOF { + t.Errorf("Read = %d, %v; want %d, EOF", n, err, len(shortChunk)) + } + } + + // And test that we see an EOF chunk, even though our buffer is already full: + { + r := NewChunkedReader(bufio.NewReader(strings.NewReader("3\r\nfoo\r\n0\r\n"))) + buf := make([]byte, 3) + n, err := r.Read(buf) + if n != 3 || err != io.EOF { + t.Errorf("Read = %d, %v; want 3, EOF", n, err) + } + if string(buf) != "foo" { + t.Errorf("buf = %q; want foo", buf) + } + } +} + +func TestChunkReaderAllocs(t *testing.T) { + if testing.Short() { + t.Skip("skipping in short mode") + } + var buf bytes.Buffer + w := NewChunkedWriter(&buf) + a, b, c := []byte("aaaaaa"), []byte("bbbbbbbbbbbb"), []byte("cccccccccccccccccccccccc") + w.Write(a) + w.Write(b) + w.Write(c) + w.Close() + + readBuf := make([]byte, len(a)+len(b)+len(c)+1) + byter := bytes.NewReader(buf.Bytes()) + bufr := bufio.NewReader(byter) + mallocs := testing.AllocsPerRun(100, func() { + byter.Seek(0, io.SeekStart) + bufr.Reset(byter) + r := NewChunkedReader(bufr) + n, err := io.ReadFull(r, readBuf) + if n != len(readBuf)-1 { + t.Fatalf("read %d bytes; want %d", n, len(readBuf)-1) + } + if err != io.ErrUnexpectedEOF { + t.Fatalf("read error = %v; want ErrUnexpectedEOF", err) + } + }) + if mallocs > 1.5 { + t.Errorf("mallocs = %v; want 1", mallocs) + } +} + +func TestParseHexUint(t *testing.T) { + type testCase struct { + in string + want uint64 + wantErr string + } + tests := []testCase{ + {"x", 0, "invalid byte in chunk length"}, + {"0000000000000000", 0, ""}, + {"0000000000000001", 1, ""}, + {"ffffffffffffffff", 1<<64 - 1, ""}, + {"000000000000bogus", 0, "invalid byte in chunk length"}, + {"00000000000000000", 0, "http chunk length too large"}, // could accept if we wanted + {"10000000000000000", 0, "http chunk length too large"}, + {"00000000000000001", 0, "http chunk length too large"}, // could accept if we wanted + } + for i := uint64(0); i <= 1234; i++ { + tests = append(tests, testCase{in: fmt.Sprintf("%x", i), want: i}) + } + for _, tt := range tests { + got, err := parseHexUint([]byte(tt.in)) + if tt.wantErr != "" { + if !strings.Contains(fmt.Sprint(err), tt.wantErr) { + t.Errorf("parseHexUint(%q) = %v, %v; want error %q", tt.in, got, err, tt.wantErr) + } + } else { + if err != nil || got != tt.want { + t.Errorf("parseHexUint(%q) = %v, %v; want %v", tt.in, got, err, tt.want) + } + } + } +} + +func TestChunkReadingIgnoresExtensions(t *testing.T) { + in := "7;ext=\"some quoted string\"\r\n" + // token=quoted string + "hello, \r\n" + + "17;someext\r\n" + // token without value + "world! 0123456789abcdef\r\n" + + "0;someextension=sometoken\r\n" // token=token + data, err := io.ReadAll(NewChunkedReader(strings.NewReader(in))) + if err != nil { + t.Fatalf("ReadAll = %q, %v", data, err) + } + if g, e := string(data), "hello, world! 0123456789abcdef"; g != e { + t.Errorf("read %q; want %q", g, e) + } +} + +// Issue 17355: ChunkedReader shouldn't block waiting for more data +// if it can return something. +func TestChunkReadPartial(t *testing.T) { + pr, pw := io.Pipe() + go func() { + pw.Write([]byte("7\r\n1234567")) + }() + cr := NewChunkedReader(pr) + readBuf := make([]byte, 7) + n, err := cr.Read(readBuf) + if err != nil { + t.Fatal(err) + } + want := "1234567" + if n != 7 || string(readBuf) != want { + t.Fatalf("Read: %v %q; want %d, %q", n, readBuf[:n], len(want), want) + } + go func() { + pw.Write([]byte("xx")) + }() + _, err = cr.Read(readBuf) + if got := fmt.Sprint(err); !strings.Contains(got, "malformed") { + t.Fatalf("second read = %v; want malformed error", err) + } + +} + +// Issue 48861: ChunkedReader should report incomplete chunks +func TestIncompleteChunk(t *testing.T) { + const valid = "4\r\nabcd\r\n" + "5\r\nabc\r\n\r\n" + "0\r\n" + + for i := 0; i < len(valid); i++ { + incomplete := valid[:i] + r := NewChunkedReader(strings.NewReader(incomplete)) + if _, err := io.ReadAll(r); err != io.ErrUnexpectedEOF { + t.Errorf("expected io.ErrUnexpectedEOF for %q, got %v", incomplete, err) + } + } + + r := NewChunkedReader(strings.NewReader(valid)) + if _, err := io.ReadAll(r); err != nil { + t.Errorf("unexpected error for %q: %v", valid, err) + } +} + +func TestChunkEndReadError(t *testing.T) { + readErr := fmt.Errorf("chunk end read error") + + r := NewChunkedReader(io.MultiReader(strings.NewReader("4\r\nabcd"), iotest.ErrReader(readErr))) + if _, err := io.ReadAll(r); err != readErr { + t.Errorf("expected %v, got %v", readErr, err) + } +} diff --git a/internal/common/error.go b/internal/common/error.go new file mode 100644 index 00000000..c6bf3edd --- /dev/null +++ b/internal/common/error.go @@ -0,0 +1,7 @@ +package common + +import "errors" + +// ErrRequestCanceled is a copy of net/http's common.ErrRequestCanceled because it's not +// exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests. +var ErrRequestCanceled = errors.New("net/http: request canceled") diff --git a/internal/compress/brotli_reader.go b/internal/compress/brotli_reader.go new file mode 100644 index 00000000..6eaeffb5 --- /dev/null +++ b/internal/compress/brotli_reader.go @@ -0,0 +1,39 @@ +package compress + +import ( + "io" + + "github.com/andybalholm/brotli" +) + +type BrotliReader struct { + Body io.ReadCloser // underlying Response.Body + br io.Reader // lazily-initialized brotli reader + berr error // sticky error +} + +func NewBrotliReader(body io.ReadCloser) *BrotliReader { + return &BrotliReader{Body: body} +} + +func (br *BrotliReader) Read(p []byte) (n int, err error) { + if br.berr != nil { + return 0, br.berr + } + if br.br == nil { + br.br = brotli.NewReader(br.Body) + } + return br.br.Read(p) +} + +func (br *BrotliReader) Close() error { + return br.Body.Close() +} + +func (br *BrotliReader) GetUnderlyingBody() io.ReadCloser { + return br.Body +} + +func (br *BrotliReader) SetUnderlyingBody(body io.ReadCloser) { + br.Body = body +} diff --git a/internal/compress/deflate_reader.go b/internal/compress/deflate_reader.go new file mode 100644 index 00000000..44cf4943 --- /dev/null +++ b/internal/compress/deflate_reader.go @@ -0,0 +1,41 @@ +package compress + +import ( + "compress/flate" + "io" +) + +type DeflateReader struct { + Body io.ReadCloser // underlying Response.Body + dr io.ReadCloser // lazily-initialized deflate reader + derr error // sticky error +} + +func NewDeflateReader(body io.ReadCloser) *DeflateReader { + return &DeflateReader{Body: body} +} + +func (df *DeflateReader) Read(p []byte) (n int, err error) { + if df.derr != nil { + return 0, df.derr + } + if df.dr == nil { + df.dr = flate.NewReader(df.Body) + } + return df.dr.Read(p) +} + +func (df *DeflateReader) Close() error { + if df.dr != nil { + return df.dr.Close() + } + return df.Body.Close() +} + +func (df *DeflateReader) GetUnderlyingBody() io.ReadCloser { + return df.Body +} + +func (df *DeflateReader) SetUnderlyingBody(body io.ReadCloser) { + df.Body = body +} diff --git a/internal/compress/gzip_reader.go b/internal/compress/gzip_reader.go new file mode 100644 index 00000000..615d7339 --- /dev/null +++ b/internal/compress/gzip_reader.go @@ -0,0 +1,49 @@ +package compress + +import ( + "compress/gzip" + "io" + "io/fs" +) + +// GzipReader wraps a response body so it can lazily +// call gzip.NewReader on the first call to Read +type GzipReader struct { + Body io.ReadCloser // underlying Response.Body + zr *gzip.Reader // lazily-initialized gzip reader + zerr error // sticky error +} + +func NewGzipReader(body io.ReadCloser) *GzipReader { + return &GzipReader{Body: body} +} + +func (gz *GzipReader) Read(p []byte) (n int, err error) { + if gz.zerr != nil { + return 0, gz.zerr + } + if gz.zr == nil { + gz.zr, err = gzip.NewReader(gz.Body) + if err != nil { + gz.zerr = err + return 0, err + } + } + return gz.zr.Read(p) +} + +func (gz *GzipReader) Close() error { + if err := gz.Body.Close(); err != nil { + return err + } + gz.zerr = fs.ErrClosed + return nil +} + +func (gz *GzipReader) GetUnderlyingBody() io.ReadCloser { + return gz.Body +} + +func (gz *GzipReader) SetUnderlyingBody(body io.ReadCloser) { + gz.Body = body +} diff --git a/internal/compress/reader.go b/internal/compress/reader.go new file mode 100644 index 00000000..0a65a3df --- /dev/null +++ b/internal/compress/reader.go @@ -0,0 +1,23 @@ +package compress + +import "io" + +type CompressReader interface { + io.ReadCloser + GetUnderlyingBody() io.ReadCloser + SetUnderlyingBody(body io.ReadCloser) +} + +func NewCompressReader(body io.ReadCloser, contentEncoding string) CompressReader { + switch contentEncoding { + case "gzip": + return NewGzipReader(body) + case "deflate": + return NewDeflateReader(body) + case "br": + return NewBrotliReader(body) + case "zstd": + return NewZstdReader(body) + } + return nil +} diff --git a/internal/compress/zstd_reader.go b/internal/compress/zstd_reader.go new file mode 100644 index 00000000..5e0b68d9 --- /dev/null +++ b/internal/compress/zstd_reader.go @@ -0,0 +1,46 @@ +package compress + +import ( + "io" + + "github.com/klauspost/compress/zstd" +) + +type ZstdReader struct { + Body io.ReadCloser // underlying Response.Body + zr *zstd.Decoder // lazily-initialized zstd reader + zerr error // sticky error +} + +func NewZstdReader(body io.ReadCloser) *ZstdReader { + return &ZstdReader{Body: body} +} + +func (zr *ZstdReader) Read(p []byte) (n int, err error) { + if zr.zerr != nil { + return 0, zr.zerr + } + if zr.zr == nil { + zr.zr, err = zstd.NewReader(zr.Body) + if err != nil { + zr.zerr = err + return 0, err + } + } + return zr.zr.Read(p) +} + +func (zr *ZstdReader) Close() error { + if zr.zr != nil { + zr.zr.Close() + } + return zr.Body.Close() +} + +func (zr *ZstdReader) GetUnderlyingBody() io.ReadCloser { + return zr.Body +} + +func (zr *ZstdReader) SetUnderlyingBody(body io.ReadCloser) { + zr.Body = body +} diff --git a/internal/dump/dump.go b/internal/dump/dump.go new file mode 100644 index 00000000..0f71da7e --- /dev/null +++ b/internal/dump/dump.go @@ -0,0 +1,224 @@ +package dump + +import ( + "context" + "io" + "net/http" +) + +// Options controls the dump behavior. +type Options interface { + Output() io.Writer + RequestHeaderOutput() io.Writer + RequestBodyOutput() io.Writer + ResponseHeaderOutput() io.Writer + ResponseBodyOutput() io.Writer + RequestHeader() bool + RequestBody() bool + ResponseHeader() bool + ResponseBody() bool + Async() bool + Clone() Options +} + +func (d *Dumper) WrapResponseBodyReadCloser(rc io.ReadCloser) io.ReadCloser { + return &dumpReponseBodyReadCloser{rc, d} +} + +type dumpReponseBodyReadCloser struct { + io.ReadCloser + dump *Dumper +} + +func (r *dumpReponseBodyReadCloser) Read(p []byte) (n int, err error) { + n, err = r.ReadCloser.Read(p) + r.dump.DumpResponseBody(p[:n]) + if err == io.EOF { + r.dump.DumpDefault([]byte("\r\n")) + } + return +} + +func (d *Dumper) WrapRequestBodyWriteCloser(rc io.WriteCloser) io.WriteCloser { + return &dumpRequestBodyWriteCloser{rc, d} +} + +type dumpRequestBodyWriteCloser struct { + io.WriteCloser + dump *Dumper +} + +func (w *dumpRequestBodyWriteCloser) Write(p []byte) (n int, err error) { + n, err = w.WriteCloser.Write(p) + w.dump.DumpRequestBody(p[:n]) + return +} + +type dumpRequestHeaderWriter struct { + w io.Writer + dump *Dumper +} + +func (w *dumpRequestHeaderWriter) Write(p []byte) (n int, err error) { + n, err = w.w.Write(p) + w.dump.DumpRequestHeader(p[:n]) + return +} + +func (d *Dumper) WrapRequestHeaderWriter(w io.Writer) io.Writer { + return &dumpRequestHeaderWriter{ + w: w, + dump: d, + } +} + +type dumpRequestBodyWriter struct { + w io.Writer + dump *Dumper +} + +func (w *dumpRequestBodyWriter) Write(p []byte) (n int, err error) { + n, err = w.w.Write(p) + w.dump.DumpRequestBody(p[:n]) + return +} + +func (d *Dumper) WrapRequestBodyWriter(w io.Writer) io.Writer { + return &dumpRequestBodyWriter{ + w: w, + dump: d, + } +} + +// GetResponseHeaderDumpers return Dumpers which need dump response header. +func GetResponseHeaderDumpers(ctx context.Context, dump *Dumper) Dumpers { + dumpers := GetDumpers(ctx, dump) + var ds []*Dumper + for _, d := range dumpers { + if d.ResponseHeader() { + ds = append(ds, d) + } + } + return Dumpers(ds) +} + +// Dumpers is an array of Dumpper +type Dumpers []*Dumper + +// ShouldDump is true if Dumper is not empty. +func (ds Dumpers) ShouldDump() bool { + return len(ds) > 0 +} + +func (ds Dumpers) DumpResponseHeader(p []byte) { + for _, d := range ds { + d.DumpResponseHeader(p) + } +} + +// Dumper is the dump tool. +type Dumper struct { + Options + ch chan *dumpTask +} + +type dumpTask struct { + Data []byte + Output io.Writer +} + +// NewDumper create a new Dumper. +func NewDumper(opt Options) *Dumper { + d := &Dumper{ + Options: opt, + ch: make(chan *dumpTask, 20), + } + return d +} + +func (d *Dumper) SetOptions(opt Options) { + d.Options = opt +} + +func (d *Dumper) Clone() *Dumper { + if d == nil { + return nil + } + return &Dumper{ + Options: d.Options.Clone(), + ch: make(chan *dumpTask, 20), + } +} + +func (d *Dumper) DumpTo(p []byte, output io.Writer) { + if len(p) == 0 || output == nil { + return + } + if d.Async() { + b := make([]byte, len(p)) + copy(b, p) + d.ch <- &dumpTask{Data: b, Output: output} + return + } + output.Write(p) +} + +func (d *Dumper) DumpDefault(p []byte) { + d.DumpTo(p, d.Output()) +} + +func (d *Dumper) DumpRequestHeader(p []byte) { + d.DumpTo(p, d.RequestHeaderOutput()) +} + +func (d *Dumper) DumpRequestBody(p []byte) { + d.DumpTo(p, d.RequestBodyOutput()) +} + +func (d *Dumper) DumpResponseHeader(p []byte) { + d.DumpTo(p, d.ResponseHeaderOutput()) +} + +func (d *Dumper) DumpResponseBody(p []byte) { + d.DumpTo(p, d.ResponseBodyOutput()) +} + +func (d *Dumper) Stop() { + d.ch <- nil +} + +func (d *Dumper) Start() { + for t := range d.ch { + if t == nil { + return + } + t.Output.Write(t.Data) + } +} + +type dumperKeyType int + +const DumperKey dumperKeyType = iota + +func GetDumpers(ctx context.Context, dump *Dumper) []*Dumper { + dumps := []*Dumper{} + if dump != nil { + dumps = append(dumps, dump) + } + if ctx == nil { + return dumps + } + if d, ok := ctx.Value(DumperKey).(*Dumper); ok { + dumps = append(dumps, d) + } + return dumps +} + +func WrapResponseBodyIfNeeded(res *http.Response, req *http.Request, dump *Dumper) { + dumps := GetDumpers(req.Context(), dump) + for _, d := range dumps { + if d.ResponseBody() { + res.Body = d.WrapResponseBodyReadCloser(res.Body) + } + } +} diff --git a/internal/godebug/godebug.go b/internal/godebug/godebug.go new file mode 100644 index 00000000..f5a0f53a --- /dev/null +++ b/internal/godebug/godebug.go @@ -0,0 +1,245 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package godebug makes the settings in the $GODEBUG environment variable +// available to other packages. These settings are often used for compatibility +// tweaks, when we need to change a default behavior but want to let users +// opt back in to the original. For example GODEBUG=http2server=0 disables +// HTTP/2 support in the net/http server. +// +// In typical usage, code should declare a Setting as a global +// and then call Value each time the current setting value is needed: +// +// var http2server = godebug.New("http2server") +// +// func ServeConn(c net.Conn) { +// if http2server.Value() == "0" { +// disallow HTTP/2 +// ... +// } +// ... +// } +// +// Each time a non-default setting causes a change in program behavior, +// code should call [Setting.IncNonDefault] to increment a counter that can +// be reported by [runtime/metrics.Read]. +// Note that counters used with IncNonDefault must be added to +// various tables in other packages. See the [Setting.IncNonDefault] +// documentation for details. +package godebug + +// Note: Be careful about new imports here. Any package +// that internal/godebug imports cannot itself import internal/godebug, +// meaning it cannot introduce a GODEBUG setting of its own. +// We keep imports to the absolute bare minimum. +import ( + "sync" + "sync/atomic" + _ "unsafe" // go:linkname + + "github.com/imroc/req/v3/internal/bisect" + "github.com/imroc/req/v3/internal/godebugs" +) + +// A Setting is a single setting in the $GODEBUG environment variable. +type Setting struct { + name string + once sync.Once + *setting +} + +type setting struct { + value atomic.Pointer[value] + nonDefaultOnce sync.Once + nonDefault atomic.Uint64 + info *godebugs.Info +} + +type value struct { + text string + bisect *bisect.Matcher +} + +// New returns a new Setting for the $GODEBUG setting with the given name. +// +// GODEBUGs meant for use by end users must be listed in ../godebugs/table.go, +// which is used for generating and checking various documentation. +// If the name is not listed in that table, New will succeed but calling Value +// on the returned Setting will panic. +// To disable that panic for access to an undocumented setting, +// prefix the name with a #, as in godebug.New("#gofsystrace"). +// The # is a signal to New but not part of the key used in $GODEBUG. +func New(name string) *Setting { + return &Setting{name: name} +} + +// Name returns the name of the setting. +func (s *Setting) Name() string { + if s.name != "" && s.name[0] == '#' { + return s.name[1:] + } + return s.name +} + +// Undocumented reports whether this is an undocumented setting. +func (s *Setting) Undocumented() bool { + return s.name != "" && s.name[0] == '#' +} + +// String returns a printable form for the setting: name=value. +func (s *Setting) String() string { + return s.Name() + "=" + s.Value() +} + +// IncNonDefault increments the non-default behavior counter +// associated with the given setting. +// This counter is exposed in the runtime/metrics value +// /godebug/non-default-behavior/:events. +// +// Note that Value must be called at least once before IncNonDefault. +func (s *Setting) IncNonDefault() { + s.nonDefaultOnce.Do(s.register) + s.nonDefault.Add(1) +} + +func (s *Setting) register() { + if s.info == nil || s.info.Opaque { + panic("godebug: unexpected IncNonDefault of " + s.name) + } +} + +// cache is a cache of all the GODEBUG settings, +// a locked map[string]*atomic.Pointer[string]. +// +// All Settings with the same name share a single +// *atomic.Pointer[string], so that when GODEBUG +// changes only that single atomic string pointer +// needs to be updated. +// +// A name appears in the values map either if it is the +// name of a Setting for which Value has been called +// at least once, or if the name has ever appeared in +// a name=value pair in the $GODEBUG environment variable. +// Once entered into the map, the name is never removed. +var cache sync.Map // name string -> value *atomic.Pointer[string] + +var empty value + +// Value returns the current value for the GODEBUG setting s. +// +// Value maintains an internal cache that is synchronized +// with changes to the $GODEBUG environment variable, +// making Value efficient to call as frequently as needed. +// Clients should therefore typically not attempt their own +// caching of Value's result. +func (s *Setting) Value() string { + s.once.Do(func() { + s.setting = lookup(s.Name()) + if s.info == nil && !s.Undocumented() { + panic("godebug: Value of name not listed in godebugs.All: " + s.name) + } + }) + v := *s.value.Load() + if v.bisect != nil && !v.bisect.Stack(&stderr) { + return "" + } + return v.text +} + +// lookup returns the unique *setting value for the given name. +func lookup(name string) *setting { + if v, ok := cache.Load(name); ok { + return v.(*setting) + } + s := new(setting) + s.info = godebugs.Lookup(name) + s.value.Store(&empty) + if v, loaded := cache.LoadOrStore(name, s); loaded { + // Lost race: someone else created it. Use theirs. + return v.(*setting) + } + + return s +} + +func newIncNonDefault(name string) func() { + s := New(name) + s.Value() + return s.IncNonDefault +} + +var updateMu sync.Mutex + +// update records an updated GODEBUG setting. +// def is the default GODEBUG setting for the running binary, +// and env is the current value of the $GODEBUG environment variable. +func update(def, env string) { + updateMu.Lock() + defer updateMu.Unlock() + + // Update all the cached values, creating new ones as needed. + // We parse the environment variable first, so that any settings it has + // are already locked in place (did[name] = true) before we consider + // the defaults. + did := make(map[string]bool) + parse(did, env) + parse(did, def) + + // Clear any cached values that are no longer present. + cache.Range(func(name, s any) bool { + if !did[name.(string)] { + s.(*setting).value.Store(&empty) + } + return true + }) +} + +// parse parses the GODEBUG setting string s, +// which has the form k=v,k2=v2,k3=v3. +// Later settings override earlier ones. +// Parse only updates settings k=v for which did[k] = false. +// It also sets did[k] = true for settings that it updates. +// Each value v can also have the form v#pattern, +// in which case the GODEBUG is only enabled for call stacks +// matching pattern, for use with golang.org/x/tools/cmd/bisect. +func parse(did map[string]bool, s string) { + // Scan the string backward so that later settings are used + // and earlier settings are ignored. + // Note that a forward scan would cause cached values + // to temporarily use the ignored value before being + // updated to the "correct" one. + end := len(s) + eq := -1 + for i := end - 1; i >= -1; i-- { + if i == -1 || s[i] == ',' { + if eq >= 0 { + name, arg := s[i+1:eq], s[eq+1:end] + if !did[name] { + did[name] = true + v := &value{text: arg} + for j := 0; j < len(arg); j++ { + if arg[j] == '#' { + v.text = arg[:j] + v.bisect, _ = bisect.New(arg[j+1:]) + break + } + } + lookup(name).value.Store(v) + } + } + eq = -1 + end = i + } else if s[i] == '=' { + eq = i + } + } +} + +type runtimeStderr struct{} + +var stderr runtimeStderr + +func (*runtimeStderr) Write(b []byte) (int, error) { + return len(b), nil +} diff --git a/internal/godebugs/table.go b/internal/godebugs/table.go new file mode 100644 index 00000000..d5ac707a --- /dev/null +++ b/internal/godebugs/table.go @@ -0,0 +1,78 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package godebugs provides a table of known GODEBUG settings, +// for use by a variety of other packages, including internal/godebug, +// runtime, runtime/metrics, and cmd/go/internal/load. +package godebugs + +// An Info describes a single known GODEBUG setting. +type Info struct { + Name string // name of the setting ("panicnil") + Package string // package that uses the setting ("runtime") + Changed int // minor version when default changed, if any; 21 means Go 1.21 + Old string // value that restores behavior prior to Changed + Opaque bool // setting does not export information to runtime/metrics using [internal/godebug.Setting.IncNonDefault] +} + +// All is the table of known settings, sorted by Name. +// +// Note: After adding entries to this table, run 'go generate runtime/metrics' +// to update the runtime/metrics doc comment. +// (Otherwise the runtime/metrics test will fail.) +// +// Note: After adding entries to this table, update the list in doc/godebug.md as well. +// (Otherwise the test in this package will fail.) +var All = []Info{ + {Name: "execerrdot", Package: "os/exec"}, + {Name: "gocachehash", Package: "cmd/go"}, + {Name: "gocachetest", Package: "cmd/go"}, + {Name: "gocacheverify", Package: "cmd/go"}, + {Name: "gotypesalias", Package: "go/types"}, + {Name: "http2client", Package: "net/http"}, + {Name: "http2debug", Package: "net/http", Opaque: true}, + {Name: "http2server", Package: "net/http"}, + {Name: "httplaxcontentlength", Package: "net/http", Changed: 22, Old: "1"}, + {Name: "httpmuxgo121", Package: "net/http", Changed: 22, Old: "1"}, + {Name: "installgoroot", Package: "go/build"}, + {Name: "jstmpllitinterp", Package: "html/template"}, + //{Name: "multipartfiles", Package: "mime/multipart"}, + {Name: "multipartmaxheaders", Package: "mime/multipart"}, + {Name: "multipartmaxparts", Package: "mime/multipart"}, + {Name: "multipathtcp", Package: "net"}, + {Name: "netdns", Package: "net", Opaque: true}, + {Name: "panicnil", Package: "runtime", Changed: 21, Old: "1"}, + {Name: "randautoseed", Package: "math/rand"}, + {Name: "tarinsecurepath", Package: "archive/tar"}, + {Name: "tls10server", Package: "crypto/tls", Changed: 22, Old: "1"}, + {Name: "tlsmaxrsasize", Package: "crypto/tls"}, + {Name: "tlsrsakex", Package: "crypto/tls", Changed: 22, Old: "1"}, + {Name: "tlsunsafeekm", Package: "crypto/tls", Changed: 22, Old: "1"}, + {Name: "winreadlinkvolume", Package: "os", Changed: 22, Old: "0"}, + {Name: "winsymlink", Package: "os", Changed: 22, Old: "0"}, + {Name: "x509sha1", Package: "crypto/x509"}, + {Name: "x509usefallbackroots", Package: "crypto/x509"}, + {Name: "x509usepolicies", Package: "crypto/x509"}, + {Name: "zipinsecurepath", Package: "archive/zip"}, +} + +// Lookup returns the Info with the given name. +func Lookup(name string) *Info { + // binary search, avoiding import of sort. + lo := 0 + hi := len(All) + for lo < hi { + m := int(uint(lo+hi) >> 1) + mid := All[m].Name + if name == mid { + return &All[m] + } + if name < mid { + hi = m + } else { + lo = m + 1 + } + } + return nil +} diff --git a/internal/header/header.go b/internal/header/header.go new file mode 100644 index 00000000..4098febe --- /dev/null +++ b/internal/header/header.go @@ -0,0 +1,44 @@ +package header + +import "strings" + +const ( + DefaultUserAgent = "req/v3 (https://github.com/imroc/req)" + UserAgent = "User-Agent" + Location = "Location" + ContentType = "Content-Type" + PlainTextContentType = "text/plain; charset=utf-8" + JsonContentType = "application/json; charset=utf-8" + XmlContentType = "text/xml; charset=utf-8" + FormContentType = "application/x-www-form-urlencoded" + WwwAuthenticate = "WWW-Authenticate" + Authorization = "Authorization" + HeaderOderKey = "__header_order__" + PseudoHeaderOderKey = "__pseudo_header_order__" +) + +var reqWriteExcludeHeader = map[string]bool{ + // Host is :authority, already sent. + // Content-Length is automatic. + "host": true, + "content-length": true, + // Per 8.1.2.2 Connection-Specific Header + // Fields, don't send connection-specific + // fields. We have already checked if any + // are error-worthy so just ignore the rest. + "connection": true, + "proxy-connection": true, + "transfer-encoding": true, + "upgrade": true, + "keep-alive": true, + // Ignore header order keys which is only used internally. + HeaderOderKey: true, + PseudoHeaderOderKey: true, +} + +func IsExcluded(key string) bool { + if reqWriteExcludeHeader[strings.ToLower(key)] { + return true + } + return false +} diff --git a/internal/header/sort.go b/internal/header/sort.go new file mode 100644 index 00000000..2c61fd2e --- /dev/null +++ b/internal/header/sort.go @@ -0,0 +1,40 @@ +package header + +import ( + "net/textproto" + "sort" +) + +type KeyValues struct { + Key string + Values []string +} + +type sorter struct { + order map[string]int + kvs []KeyValues +} + +func (s *sorter) Len() int { return len(s.kvs) } +func (s *sorter) Swap(i, j int) { s.kvs[i], s.kvs[j] = s.kvs[j], s.kvs[i] } +func (s *sorter) Less(i, j int) bool { + if index, ok := s.order[textproto.CanonicalMIMEHeaderKey(s.kvs[i].Key)]; ok { + i = index + } + if index, ok := s.order[textproto.CanonicalMIMEHeaderKey(s.kvs[j].Key)]; ok { + j = index + } + return i < j +} + +func SortKeyValues(kvs []KeyValues, orderedKeys []string) { + order := make(map[string]int) + for i, key := range orderedKeys { + order[textproto.CanonicalMIMEHeaderKey(key)] = i + } + s := &sorter{ + order: order, + kvs: kvs, + } + sort.Sort(s) +} diff --git a/internal/http2/client_conn_pool.go b/internal/http2/client_conn_pool.go new file mode 100644 index 00000000..6136871a --- /dev/null +++ b/internal/http2/client_conn_pool.go @@ -0,0 +1,281 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "context" + "errors" + "net" + "net/http" + "sync" +) + +// ClientConnPool manages a pool of HTTP/2 client connections. +type ClientConnPool interface { + // GetClientConn returns a specific HTTP/2 connection (usually + // a TLS-TCP connection) to an HTTP/2 server. On success, the + // returned ClientConn accounts for the upcoming RoundTrip + // call, so the caller should not omit it. If the caller needs + // to, ClientConn.RoundTrip can be called with a bogus + // new(http.Request) to release the stream reservation. + GetClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) + MarkDead(*ClientConn) + CloseIdleConnections() + AddConnIfNeeded(key string, t *Transport, c net.Conn) (used bool, err error) +} + +// TODO: use singleflight for dialing and addConnCalls? +type clientConnPool struct { + t *Transport + + mu sync.Mutex // TODO: maybe switch to RWMutex + // TODO: add support for sharing conns based on cert names + // (e.g. share conn for googleapis.com and appspot.com) + conns map[string][]*ClientConn // key is host:port + dialing map[string]*dialCall // currently in-flight dials + keys map[*ClientConn][]string + addConnCalls map[string]*addConnCall // in-flight addConnIfNeeded calls +} + +func (p *clientConnPool) GetClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) { + // TODO(dneil): Dial a new connection when t.DisableKeepAlives is set? + if isConnectionCloseRequest(req) && dialOnMiss { + // It gets its own connection. + traceGetConn(req, addr) + const singleUse = true + cc, err := p.t.dialClientConn(req.Context(), addr, singleUse) + if err != nil { + return nil, err + } + return cc, nil + } + for { + p.mu.Lock() + for _, cc := range p.conns[addr] { + if cc.ReserveNewRequest() { + // When a connection is presented to us by the net/http package, + // the GetConn hook has already been called. + // Don't call it a second time here. + if !cc.getConnCalled { + traceGetConn(req, addr) + } + cc.getConnCalled = false + p.mu.Unlock() + return cc, nil + } + } + if !dialOnMiss { + p.mu.Unlock() + return nil, ErrNoCachedConn + } + traceGetConn(req, addr) + call := p.getStartDialLocked(req.Context(), addr) + p.mu.Unlock() + <-call.done + if shouldRetryDial(call, req) { + continue + } + cc, err := call.res, call.err + if err != nil { + return nil, err + } + if cc.ReserveNewRequest() { + return cc, nil + } + } +} + +// dialCall is an in-flight Transport dial call to a host. +type dialCall struct { + _ incomparable + p *clientConnPool + // the context associated with the request + // that created this dialCall + ctx context.Context + done chan struct{} // closed when done + res *ClientConn // valid after done is closed + err error // valid after done is closed +} + +// requires p.mu is held. +func (p *clientConnPool) getStartDialLocked(ctx context.Context, addr string) *dialCall { + if call, ok := p.dialing[addr]; ok { + // A dial is already in-flight. Don't start another. + return call + } + call := &dialCall{p: p, done: make(chan struct{}), ctx: ctx} + if p.dialing == nil { + p.dialing = make(map[string]*dialCall) + } + p.dialing[addr] = call + go call.dial(call.ctx, addr) + return call +} + +// run in its own goroutine. +func (c *dialCall) dial(ctx context.Context, addr string) { + const singleUse = false // shared conn + c.res, c.err = c.p.t.dialClientConn(ctx, addr, singleUse) + + c.p.mu.Lock() + delete(c.p.dialing, addr) + if c.err == nil { + c.p.addConnLocked(addr, c.res) + } + c.p.mu.Unlock() + + close(c.done) +} + +// addConnIfNeeded makes a NewClientConn out of c if a connection for key doesn't +// already exist. It coalesces concurrent calls with the same key. +// This is used by the http1 Transport code when it creates a new connection. Because +// the http1 Transport doesn't de-dup TCP dials to outbound hosts (because it doesn't know +// the protocol), it can get into a situation where it has multiple TLS connections. +// This code decides which ones live or die. +// The return value used is whether c was used. +// c is never closed. +func (p *clientConnPool) AddConnIfNeeded(key string, t *Transport, c net.Conn) (used bool, err error) { + p.mu.Lock() + for _, cc := range p.conns[key] { + if cc.CanTakeNewRequest() { + p.mu.Unlock() + return false, nil + } + } + call, dup := p.addConnCalls[key] + if !dup { + if p.addConnCalls == nil { + p.addConnCalls = make(map[string]*addConnCall) + } + call = &addConnCall{ + p: p, + done: make(chan struct{}), + } + p.addConnCalls[key] = call + go call.run(t, key, c) + } + p.mu.Unlock() + + <-call.done + if call.err != nil { + return false, call.err + } + return !dup, nil +} + +type addConnCall struct { + _ incomparable + p *clientConnPool + done chan struct{} // closed when done + err error +} + +func (c *addConnCall) run(t *Transport, key string, tc net.Conn) { + cc, err := t.NewClientConn(tc) + + p := c.p + p.mu.Lock() + if err != nil { + c.err = err + } else { + cc.getConnCalled = true // already called by the net/http package + p.addConnLocked(key, cc) + } + delete(p.addConnCalls, key) + p.mu.Unlock() + close(c.done) +} + +// p.mu must be held +func (p *clientConnPool) addConnLocked(key string, cc *ClientConn) { + for _, v := range p.conns[key] { + if v == cc { + return + } + } + if p.conns == nil { + p.conns = make(map[string][]*ClientConn) + } + if p.keys == nil { + p.keys = make(map[*ClientConn][]string) + } + p.conns[key] = append(p.conns[key], cc) + p.keys[cc] = append(p.keys[cc], key) +} + +func (p *clientConnPool) MarkDead(cc *ClientConn) { + p.mu.Lock() + defer p.mu.Unlock() + for _, key := range p.keys[cc] { + vv, ok := p.conns[key] + if !ok { + continue + } + newList := filterOutClientConn(vv, cc) + if len(newList) > 0 { + p.conns[key] = newList + } else { + delete(p.conns, key) + } + } + delete(p.keys, cc) +} + +func (p *clientConnPool) CloseIdleConnections() { + p.mu.Lock() + defer p.mu.Unlock() + // TODO: don't close a cc if it was just added to the pool + // milliseconds ago and has never been used. There's currently + // a small race window with the HTTP/1 Transport's integration + // where it can add an idle conn just before using it, and + // somebody else can concurrently call CloseIdleConns and + // break some caller's RoundTrip. + for _, vv := range p.conns { + for _, cc := range vv { + cc.closeIfIdle() + } + } +} + +func filterOutClientConn(in []*ClientConn, exclude *ClientConn) []*ClientConn { + out := in[:0] + for _, v := range in { + if v != exclude { + out = append(out, v) + } + } + // If we filtered it out, zero out the last item to prevent + // the GC from seeing it. + if len(in) != len(out) { + in[len(in)-1] = nil + } + return out +} + +// shouldRetryDial reports whether the current request should +// retry dialing after the call finished unsuccessfully, for example +// if the dial was canceled because of a context cancellation or +// deadline expiry. +func shouldRetryDial(call *dialCall, req *http.Request) bool { + if call.err == nil { + // No error, no need to retry + return false + } + if call.ctx == req.Context() { + // If the call has the same context as the request, the dial + // should not be retried, since any cancellation will have come + // from this request. + return false + } + if !errors.Is(call.err, context.Canceled) && !errors.Is(call.err, context.DeadlineExceeded) { + // If the call error is not because of a context cancellation or a deadline expiry, + // the dial should not be retried. + return false + } + // Only retry if the error is a context cancellation error or deadline expiry + // and the context associated with the call was canceled or expired. + return call.ctx.Err() != nil +} diff --git a/internal/http2/databuffer.go b/internal/http2/databuffer.go new file mode 100644 index 00000000..e6f55cbd --- /dev/null +++ b/internal/http2/databuffer.go @@ -0,0 +1,149 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "errors" + "fmt" + "sync" +) + +// Buffer chunks are allocated from a pool to reduce pressure on GC. +// The maximum wasted space per dataBuffer is 2x the largest size class, +// which happens when the dataBuffer has multiple chunks and there is +// one unread byte in both the first and last chunks. We use a few size +// classes to minimize overheads for servers that typically receive very +// small request bodies. +// +// TODO: Benchmark to determine if the pools are necessary. The GC may have +// improved enough that we can instead allocate chunks like this: +// make([]byte, max(16<<10, expectedBytesRemaining)) +var dataChunkPools = [...]sync.Pool{ + {New: func() interface{} { return new([1 << 10]byte) }}, + {New: func() interface{} { return new([2 << 10]byte) }}, + {New: func() interface{} { return new([4 << 10]byte) }}, + {New: func() interface{} { return new([8 << 10]byte) }}, + {New: func() interface{} { return new([16 << 10]byte) }}, +} + +func getDataBufferChunk(size int64) []byte { + switch { + case size <= 1<<10: + return dataChunkPools[0].Get().(*[1 << 10]byte)[:] + case size <= 2<<10: + return dataChunkPools[1].Get().(*[2 << 10]byte)[:] + case size <= 4<<10: + return dataChunkPools[2].Get().(*[4 << 10]byte)[:] + case size <= 8<<10: + return dataChunkPools[3].Get().(*[8 << 10]byte)[:] + default: + return dataChunkPools[4].Get().(*[16 << 10]byte)[:] + } +} + +func putDataBufferChunk(p []byte) { + switch len(p) { + case 1 << 10: + dataChunkPools[0].Put((*[1 << 10]byte)(p)) + case 2 << 10: + dataChunkPools[1].Put((*[2 << 10]byte)(p)) + case 4 << 10: + dataChunkPools[2].Put((*[4 << 10]byte)(p)) + case 8 << 10: + dataChunkPools[3].Put((*[8 << 10]byte)(p)) + case 16 << 10: + dataChunkPools[4].Put((*[16 << 10]byte)(p)) + default: + panic(fmt.Sprintf("unexpected buffer len=%v", len(p))) + } +} + +// dataBuffer is an io.ReadWriter backed by a list of data chunks. +// Each dataBuffer is used to read DATA frames on a single stream. +// The buffer is divided into chunks so the server can limit the +// total memory used by a single connection without limiting the +// request body size on any single stream. +type dataBuffer struct { + chunks [][]byte + r int // next byte to read is chunks[0][r] + w int // next byte to write is chunks[len(chunks)-1][w] + size int // total buffered bytes + expected int64 // we expect at least this many bytes in future Write calls (ignored if <= 0) +} + +var errReadEmpty = errors.New("read from empty dataBuffer") + +// Read copies bytes from the buffer into p. +// It is an error to read when no data is available. +func (b *dataBuffer) Read(p []byte) (int, error) { + if b.size == 0 { + return 0, errReadEmpty + } + var ntotal int + for len(p) > 0 && b.size > 0 { + readFrom := b.bytesFromFirstChunk() + n := copy(p, readFrom) + p = p[n:] + ntotal += n + b.r += n + b.size -= n + // If the first chunk has been consumed, advance to the next chunk. + if b.r == len(b.chunks[0]) { + putDataBufferChunk(b.chunks[0]) + end := len(b.chunks) - 1 + copy(b.chunks[:end], b.chunks[1:]) + b.chunks[end] = nil + b.chunks = b.chunks[:end] + b.r = 0 + } + } + return ntotal, nil +} + +func (b *dataBuffer) bytesFromFirstChunk() []byte { + if len(b.chunks) == 1 { + return b.chunks[0][b.r:b.w] + } + return b.chunks[0][b.r:] +} + +// Len returns the number of bytes of the unread portion of the buffer. +func (b *dataBuffer) Len() int { + return b.size +} + +// Write appends p to the buffer. +func (b *dataBuffer) Write(p []byte) (int, error) { + ntotal := len(p) + for len(p) > 0 { + // If the last chunk is empty, allocate a new chunk. Try to allocate + // enough to fully copy p plus any additional bytes we expect to + // receive. However, this may allocate less than len(p). + want := int64(len(p)) + if b.expected > want { + want = b.expected + } + chunk := b.lastChunkOrAlloc(want) + n := copy(chunk[b.w:], p) + p = p[n:] + b.w += n + b.size += n + b.expected -= int64(n) + } + return ntotal, nil +} + +func (b *dataBuffer) lastChunkOrAlloc(want int64) []byte { + if len(b.chunks) != 0 { + last := b.chunks[len(b.chunks)-1] + if b.w < len(last) { + return last + } + } + chunk := getDataBufferChunk(want) + b.chunks = append(b.chunks, chunk) + b.w = 0 + return chunk +} diff --git a/internal/http2/errors.go b/internal/http2/errors.go new file mode 100644 index 00000000..07bc7d6b --- /dev/null +++ b/internal/http2/errors.go @@ -0,0 +1,138 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "errors" + "fmt" +) + +// An ErrCode is an unsigned 32-bit error code as defined in the HTTP/2 spec. +type ErrCode uint32 + +const ( + ErrCodeNo ErrCode = 0x0 + ErrCodeProtocol ErrCode = 0x1 + ErrCodeInternal ErrCode = 0x2 + ErrCodeFlowControl ErrCode = 0x3 + ErrCodeSettingsTimeout ErrCode = 0x4 + ErrCodeStreamClosed ErrCode = 0x5 + ErrCodeFrameSize ErrCode = 0x6 + ErrCodeRefusedStream ErrCode = 0x7 + ErrCodeCancel ErrCode = 0x8 + ErrCodeCompression ErrCode = 0x9 + ErrCodeConnect ErrCode = 0xa + ErrCodeEnhanceYourCalm ErrCode = 0xb + ErrCodeInadequateSecurity ErrCode = 0xc + ErrCodeHTTP11Required ErrCode = 0xd +) + +var errCodeName = map[ErrCode]string{ + ErrCodeNo: "NO_ERROR", + ErrCodeProtocol: "PROTOCOL_ERROR", + ErrCodeInternal: "INTERNAL_ERROR", + ErrCodeFlowControl: "FLOW_CONTROL_ERROR", + ErrCodeSettingsTimeout: "SETTINGS_TIMEOUT", + ErrCodeStreamClosed: "STREAM_CLOSED", + ErrCodeFrameSize: "FRAME_SIZE_ERROR", + ErrCodeRefusedStream: "REFUSED_STREAM", + ErrCodeCancel: "CANCEL", + ErrCodeCompression: "COMPRESSION_ERROR", + ErrCodeConnect: "CONNECT_ERROR", + ErrCodeEnhanceYourCalm: "ENHANCE_YOUR_CALM", + ErrCodeInadequateSecurity: "INADEQUATE_SECURITY", + ErrCodeHTTP11Required: "HTTP_1_1_REQUIRED", +} + +func (e ErrCode) String() string { + if s, ok := errCodeName[e]; ok { + return s + } + return fmt.Sprintf("unknown error code 0x%x", uint32(e)) +} + +func (e ErrCode) stringToken() string { + if s, ok := errCodeName[e]; ok { + return s + } + return fmt.Sprintf("ERR_UNKNOWN_%d", uint32(e)) +} + +// ConnectionError is an error that results in the termination of the +// entire connection. +type ConnectionError ErrCode + +func (e ConnectionError) Error() string { + return fmt.Sprintf("connection error: %s", ErrCode(e)) +} + +// StreamError is an error that only affects one stream within an +// HTTP/2 connection. +type StreamError struct { + StreamID uint32 + Code ErrCode + Cause error // optional additional detail +} + +// errFromPeer is a sentinel error value for StreamError.Cause to +// indicate that the StreamError was sent from the peer over the wire +// and wasn't locally generated in the Transport. +var errFromPeer = errors.New("received from peer") + +func streamError(id uint32, code ErrCode) StreamError { + return StreamError{StreamID: id, Code: code} +} + +func (e StreamError) Error() string { + if e.Cause != nil { + return fmt.Sprintf("stream error: stream ID %d; %v; %v", e.StreamID, e.Code, e.Cause) + } + return fmt.Sprintf("stream error: stream ID %d; %v", e.StreamID, e.Code) +} + +// connError represents an HTTP/2 ConnectionError error code, along +// with a string (for debugging) explaining why. +// +// Errors of this type are only returned by the frame parser functions +// and converted into ConnectionError(Code), after stashing away +// the Reason into the Framer's errDetail field, accessible via +// the (*Framer).ErrorDetail method. +type connError struct { + Code ErrCode // the ConnectionError error code + Reason string // additional reason +} + +func (e connError) Error() string { + return fmt.Sprintf("http2: connection error: %v: %v", e.Code, e.Reason) +} + +type pseudoHeaderError string + +func (e pseudoHeaderError) Error() string { + return fmt.Sprintf("invalid pseudo-header %q", string(e)) +} + +type duplicatePseudoHeaderError string + +func (e duplicatePseudoHeaderError) Error() string { + return fmt.Sprintf("duplicate pseudo-header %q", string(e)) +} + +type headerFieldNameError string + +func (e headerFieldNameError) Error() string { + return fmt.Sprintf("invalid header field name %q", string(e)) +} + +type headerFieldValueError string + +func (e headerFieldValueError) Error() string { + return fmt.Sprintf("invalid header field value for %q", string(e)) +} + +var ( + errMixPseudoHeaderTypes = errors.New("mix of request and response pseudo headers") + errPseudoAfterRegular = errors.New("pseudo header field after regular") +) diff --git a/internal/http2/flow.go b/internal/http2/flow.go new file mode 100644 index 00000000..b7dbd186 --- /dev/null +++ b/internal/http2/flow.go @@ -0,0 +1,120 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Flow control + +package http2 + +// inflowMinRefresh is the minimum number of bytes we'll send for a +// flow control window update. +const inflowMinRefresh = 4 << 10 + +// inflow accounts for an inbound flow control window. +// It tracks both the latest window sent to the peer (used for enforcement) +// and the accumulated unsent window. +type inflow struct { + avail int32 + unsent int32 +} + +// init sets the initial window. +func (f *inflow) init(n int32) { + f.avail = n +} + +// add adds n bytes to the window, with a maximum window size of max, +// indicating that the peer can now send us more data. +// For example, the user read from a {Request,Response} body and consumed +// some of the buffered data, so the peer can now send more. +// It returns the number of bytes to send in a WINDOW_UPDATE frame to the peer. +// Window updates are accumulated and sent when the unsent capacity +// is at least inflowMinRefresh or will at least double the peer's available window. +func (f *inflow) add(n int) (connAdd int32) { + if n < 0 { + panic("negative update") + } + unsent := int64(f.unsent) + int64(n) + // "A sender MUST NOT allow a flow-control window to exceed 2^31-1 octets." + // RFC 7540 Section 6.9.1. + const maxWindow = 1<<31 - 1 + if unsent+int64(f.avail) > maxWindow { + panic("flow control update exceeds maximum window size") + } + f.unsent = int32(unsent) + if f.unsent < inflowMinRefresh && f.unsent < f.avail { + // If there aren't at least inflowMinRefresh bytes of window to send, + // and this update won't at least double the window, buffer the update for later. + return 0 + } + f.avail += f.unsent + f.unsent = 0 + return int32(unsent) +} + +// take attempts to take n bytes from the peer's flow control window. +// It reports whether the window has available capacity. +func (f *inflow) take(n uint32) bool { + if n > uint32(f.avail) { + return false + } + f.avail -= int32(n) + return true +} + +// takeInflows attempts to take n bytes from two inflows, +// typically connection-level and stream-level flows. +// It reports whether both windows have available capacity. +func takeInflows(f1, f2 *inflow, n uint32) bool { + if n > uint32(f1.avail) || n > uint32(f2.avail) { + return false + } + f1.avail -= int32(n) + f2.avail -= int32(n) + return true +} + +// outflow is the outbound flow control window's size. +type outflow struct { + _ incomparable + + // n is the number of DATA bytes we're allowed to send. + // An outflow is kept both on a conn and a per-stream. + n int32 + + // conn points to the shared connection-level outflow that is + // shared by all streams on that conn. It is nil for the outflow + // that's on the conn directly. + conn *outflow +} + +func (f *outflow) setConnFlow(cf *outflow) { f.conn = cf } + +func (f *outflow) available() int32 { + n := f.n + if f.conn != nil && f.conn.n < n { + n = f.conn.n + } + return n +} + +func (f *outflow) take(n int32) { + if n > f.available() { + panic("internal error: took too much") + } + f.n -= n + if f.conn != nil { + f.conn.n -= n + } +} + +// add adds n bytes (positive or negative) to the flow control window. +// It returns false if the sum would exceed 2^31-1. +func (f *outflow) add(n int32) bool { + sum := f.n + n + if (sum > n) == (f.n > 0) { + f.n = sum + return true + } + return false +} diff --git a/internal/http2/frame.go b/internal/http2/frame.go new file mode 100644 index 00000000..077e181e --- /dev/null +++ b/internal/http2/frame.go @@ -0,0 +1,1736 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "bytes" + "context" + "encoding/binary" + "errors" + "fmt" + "io" + "log" + "net/http" + "strings" + "sync" + + "github.com/imroc/req/v3/http2" + "github.com/imroc/req/v3/internal/dump" + "golang.org/x/net/http/httpguts" + "golang.org/x/net/http2/hpack" +) + +const frameHeaderLen = 9 + +var padZeros = make([]byte, 255) // zeros for padding + +// A FrameType is a registered frame type as defined in +// https://httpwg.org/specs/rfc7540.html#rfc.section.11.2 +type FrameType uint8 + +const ( + FrameData FrameType = 0x0 + FrameHeaders FrameType = 0x1 + FramePriority FrameType = 0x2 + FrameRSTStream FrameType = 0x3 + FrameSettings FrameType = 0x4 + FramePushPromise FrameType = 0x5 + FramePing FrameType = 0x6 + FrameGoAway FrameType = 0x7 + FrameWindowUpdate FrameType = 0x8 + FrameContinuation FrameType = 0x9 +) + +var frameName = map[FrameType]string{ + FrameData: "DATA", + FrameHeaders: "HEADERS", + FramePriority: "PRIORITY", + FrameRSTStream: "RST_STREAM", + FrameSettings: "SETTINGS", + FramePushPromise: "PUSH_PROMISE", + FramePing: "PING", + FrameGoAway: "GOAWAY", + FrameWindowUpdate: "WINDOW_UPDATE", + FrameContinuation: "CONTINUATION", +} + +func (t FrameType) String() string { + if s, ok := frameName[t]; ok { + return s + } + return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", uint8(t)) +} + +// Flags is a bitmask of HTTP/2 flags. +// The meaning of flags varies depending on the frame type. +type Flags uint8 + +// Has reports whether f contains all (0 or more) flags in v. +func (f Flags) Has(v Flags) bool { + return (f & v) == v +} + +// Frame-specific FrameHeader flag bits. +const ( + // Data Frame + FlagDataEndStream Flags = 0x1 + FlagDataPadded Flags = 0x8 + + // Headers Frame + FlagHeadersEndStream Flags = 0x1 + FlagHeadersEndHeaders Flags = 0x4 + FlagHeadersPadded Flags = 0x8 + FlagHeadersPriority Flags = 0x20 + + // Settings Frame + FlagSettingsAck Flags = 0x1 + + // Ping Frame + FlagPingAck Flags = 0x1 + + // Continuation Frame + FlagContinuationEndHeaders Flags = 0x4 + + FlagPushPromiseEndHeaders Flags = 0x4 + FlagPushPromisePadded Flags = 0x8 +) + +var flagName = map[FrameType]map[Flags]string{ + FrameData: { + FlagDataEndStream: "END_STREAM", + FlagDataPadded: "PADDED", + }, + FrameHeaders: { + FlagHeadersEndStream: "END_STREAM", + FlagHeadersEndHeaders: "END_HEADERS", + FlagHeadersPadded: "PADDED", + FlagHeadersPriority: "PRIORITY", + }, + FrameSettings: { + FlagSettingsAck: "ACK", + }, + FramePing: { + FlagPingAck: "ACK", + }, + FrameContinuation: { + FlagContinuationEndHeaders: "END_HEADERS", + }, + FramePushPromise: { + FlagPushPromiseEndHeaders: "END_HEADERS", + FlagPushPromisePadded: "PADDED", + }, +} + +// a frameParser parses a frame given its FrameHeader and payload +// bytes. The length of payload will always equal fh.Length (which +// might be 0). +type frameParser func(fc *frameCache, fh FrameHeader, countError func(string), payload []byte) (Frame, error) + +var frameParsers = map[FrameType]frameParser{ + FrameData: parseDataFrame, + FrameHeaders: parseHeadersFrame, + FramePriority: parsePriorityFrame, + FrameRSTStream: parseRSTStreamFrame, + FrameSettings: parseSettingsFrame, + FramePushPromise: parsePushPromise, + FramePing: parsePingFrame, + FrameGoAway: parseGoAwayFrame, + FrameWindowUpdate: parseWindowUpdateFrame, + FrameContinuation: parseContinuationFrame, +} + +func typeFrameParser(t FrameType) frameParser { + if f := frameParsers[t]; f != nil { + return f + } + return parseUnknownFrame +} + +// A FrameHeader is the 9 byte header of all HTTP/2 frames. +// +// See https://httpwg.org/specs/rfc7540.html#FrameHeader +type FrameHeader struct { + valid bool // caller can access []byte fields in the Frame + + // Type is the 1 byte frame type. There are ten standard frame + // types, but extension frame types may be written by WriteRawFrame + // and will be returned by ReadFrame (as UnknownFrame). + Type FrameType + + // Flags are the 1 byte of 8 potential bit flags per frame. + // They are specific to the frame type. + Flags Flags + + // Length is the length of the frame, not including the 9 byte header. + // The maximum size is one byte less than 16MB (uint24), but only + // frames up to 16KB are allowed without peer agreement. + Length uint32 + + // StreamID is which stream this frame is for. Certain frames + // are not stream-specific, in which case this field is 0. + StreamID uint32 +} + +// Header returns h. It exists so FrameHeaders can be embedded in other +// specific frame types and implement the Frame interface. +func (h FrameHeader) Header() FrameHeader { return h } + +func (h FrameHeader) String() string { + var buf bytes.Buffer + buf.WriteString("[FrameHeader ") + h.writeDebug(&buf) + buf.WriteByte(']') + return buf.String() +} + +func (h FrameHeader) writeDebug(buf *bytes.Buffer) { + buf.WriteString(h.Type.String()) + if h.Flags != 0 { + buf.WriteString(" flags=") + set := 0 + for i := uint8(0); i < 8; i++ { + if h.Flags&(1< 1 { + buf.WriteByte('|') + } + name := flagName[h.Type][Flags(1<>24), + byte(streamID>>16), + byte(streamID>>8), + byte(streamID)) +} + +func (h2f *Framer) endWrite() error { + // Now that we know the final size, fill in the FrameHeader in + // the space previously reserved for it. Abuse append. + length := len(h2f.wbuf) - frameHeaderLen + if length >= (1 << 24) { + return errFrameTooLarge + } + _ = append(h2f.wbuf[:0], + byte(length>>16), + byte(length>>8), + byte(length)) + if h2f.logWrites { + h2f.logWrite() + } + + n, err := h2f.w.Write(h2f.wbuf) + if err == nil && n != len(h2f.wbuf) { + err = io.ErrShortWrite + } + return err +} + +func (h2f *Framer) logWrite() { + if h2f.debugFramer == nil { + h2f.debugFramerBuf = new(bytes.Buffer) + h2f.debugFramer = NewFramer(nil, h2f.debugFramerBuf) + h2f.debugFramer.logReads = false // we log it ourselves, saying "wrote" below + // Let us read anything, even if we accidentally wrote it + // in the wrong order: + h2f.debugFramer.AllowIllegalReads = true + } + h2f.debugFramerBuf.Write(h2f.wbuf) + fr, err := h2f.debugFramer.ReadFrame() + if err != nil { + h2f.debugWriteLoggerf("http2: Framer %p: failed to decode just-written frame", h2f) + return + } + h2f.debugWriteLoggerf("http2: Framer %p: wrote %v", h2f, summarizeFrame(fr)) +} + +func (h2f *Framer) writeByte(v byte) { h2f.wbuf = append(h2f.wbuf, v) } + +func (h2f *Framer) writeBytes(v []byte) { h2f.wbuf = append(h2f.wbuf, v...) } + +func (h2f *Framer) writeUint16(v uint16) { h2f.wbuf = append(h2f.wbuf, byte(v>>8), byte(v)) } + +func (h2f *Framer) writeUint32(v uint32) { + h2f.wbuf = append(h2f.wbuf, byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) +} + +const ( + minMaxFrameSize = 1 << 14 + maxFrameSize = 1<<24 - 1 +) + +// SetReuseFrames allows the Framer to reuse Frames. +// If called on a Framer, Frames returned by calls to ReadFrame are only +// valid until the next call to ReadFrame. +func (h2f *Framer) SetReuseFrames() { + if h2f.frameCache != nil { + return + } + h2f.frameCache = &frameCache{} +} + +type frameCache struct { + dataFrame DataFrame +} + +func (fc *frameCache) getDataFrame() *DataFrame { + if fc == nil { + return &DataFrame{} + } + return &fc.dataFrame +} + +// NewFramer returns a Framer that writes frames to w and reads them from r. +func NewFramer(w io.Writer, r io.Reader) *Framer { + fr := &Framer{ + w: w, + r: r, + countError: func(string) {}, + logReads: logFrameReads, + logWrites: logFrameWrites, + debugReadLoggerf: log.Printf, + debugWriteLoggerf: log.Printf, + } + fr.getReadBuf = func(size uint32) []byte { + if cap(fr.readBuf) >= int(size) { + return fr.readBuf[:size] + } + fr.readBuf = make([]byte, size) + return fr.readBuf + } + fr.SetMaxReadFrameSize(maxFrameSize) + return fr +} + +// SetMaxReadFrameSize sets the maximum size of a frame +// that will be read by a subsequent call to ReadFrame. +// It is the caller's responsibility to advertise this +// limit with a SETTINGS frame. +func (h2f *Framer) SetMaxReadFrameSize(v uint32) { + if v > maxFrameSize { + v = maxFrameSize + } + h2f.maxReadSize = v +} + +// ErrorDetail returns a more detailed error of the last error +// returned by Framer.ReadFrame. For instance, if ReadFrame +// returns a StreamError with code PROTOCOL_ERROR, ErrorDetail +// will say exactly what was invalid. ErrorDetail is not guaranteed +// to return a non-nil value and like the rest of the http2 package, +// its return value is not protected by an API compatibility promise. +// ErrorDetail is reset after the next call to ReadFrame. +func (h2f *Framer) ErrorDetail() error { + return h2f.errDetail +} + +// errFrameTooLarge is returned from Framer.ReadFrame when the peer +// sends a frame that is larger than declared with SetMaxReadFrameSize. +var errFrameTooLarge = errors.New("http2: frame too large") + +// terminalReadFrameError reports whether err is an unrecoverable +// error from ReadFrame and no other frames should be read. +func terminalReadFrameError(err error) bool { + if _, ok := err.(StreamError); ok { + return false + } + return err != nil +} + +func (h2f *Framer) streamByID(id uint32) *clientStream { + if h2f.cc == nil { + return nil + } + h2f.cc.mu.Lock() + defer h2f.cc.mu.Unlock() + cs := h2f.cc.streams[id] + if cs != nil && !cs.readAborted { + return cs + } + return nil +} + +func (h2f *Framer) currentRequest(id uint32) *http.Request { + if cs := h2f.streamByID(id); cs != nil { + if req := cs.currentRequest; req != nil { + return req + } + } + return nil +} + +// ReadFrame reads a single frame. The returned Frame is only valid +// until the next call to ReadFrame. +// +// If the frame is larger than previously set with SetMaxReadFrameSize, the +// returned error is errFrameTooLarge. Other errors may be of type +// ConnectionError, StreamError, or anything else from the underlying +// reader. +// +// If ReadFrame returns an error and a non-nil Frame, the Frame's StreamID +// indicates the stream responsible for the error. +func (h2f *Framer) ReadFrame() (Frame, error) { + h2f.errDetail = nil + if h2f.lastFrame != nil { + h2f.lastFrame.invalidate() + } + fh, err := readFrameHeader(h2f.headerBuf[:], h2f.r) + if err != nil { + return nil, err + } + if fh.Length > h2f.maxReadSize { + return nil, errFrameTooLarge + } + payload := h2f.getReadBuf(fh.Length) + if _, err := io.ReadFull(h2f.r, payload); err != nil { + return nil, err + } + f, err := typeFrameParser(fh.Type)(h2f.frameCache, fh, h2f.countError, payload) + if err != nil { + if ce, ok := err.(connError); ok { + return nil, h2f.connError(ce.Code, ce.Reason) + } + return nil, err + } + if err := h2f.checkFrameOrder(f); err != nil { + return nil, err + } + if h2f.logReads { + h2f.debugReadLoggerf("http2: Framer %p: read %v", h2f, summarizeFrame(f)) + } + if fh.Type == FrameHeaders && h2f.ReadMetaHeaders != nil { + hf := f.(*HeadersFrame) + req := h2f.currentRequest(hf.StreamID) + var ctx context.Context + if req != nil { + ctx = req.Context() + } + var dumps []*dump.Dumper + if h2f.cc != nil { + dumps = dump.GetDumpers(ctx, h2f.cc.t.Dump) + } + if len(dumps) > 0 { + dd := []*dump.Dumper{} + for _, dump := range dumps { + if dump.ResponseHeader() { + dd = append(dd, dump) + } + } + dumps = dd + } + hr, err := h2f.readMetaFrame(hf, dumps) + if err == nil && len(dumps) > 0 { + for _, dump := range dumps { + dump.DumpResponseHeader([]byte("\r\n")) + } + } + return hr, err + } + return f, nil +} + +// connError returns ConnectionError(code) but first +// stashes away a public reason to the caller can optionally relay it +// to the peer before hanging up on them. This might help others debug +// their implementations. +func (h2f *Framer) connError(code ErrCode, reason string) error { + h2f.errDetail = errors.New(reason) + return ConnectionError(code) +} + +// checkFrameOrder reports an error if f is an invalid frame to return +// next from ReadFrame. Mostly it checks whether HEADERS and +// CONTINUATION frames are contiguous. +func (h2f *Framer) checkFrameOrder(f Frame) error { + last := h2f.lastFrame + h2f.lastFrame = f + if h2f.AllowIllegalReads { + return nil + } + + fh := f.Header() + if h2f.lastHeaderStream != 0 { + if fh.Type != FrameContinuation { + return h2f.connError(ErrCodeProtocol, + fmt.Sprintf("got %s for stream %d; expected CONTINUATION following %s for stream %d", + fh.Type, fh.StreamID, + last.Header().Type, h2f.lastHeaderStream)) + } + if fh.StreamID != h2f.lastHeaderStream { + return h2f.connError(ErrCodeProtocol, + fmt.Sprintf("got CONTINUATION for stream %d; expected stream %d", + fh.StreamID, h2f.lastHeaderStream)) + } + } else if fh.Type == FrameContinuation { + return h2f.connError(ErrCodeProtocol, fmt.Sprintf("unexpected CONTINUATION for stream %d", fh.StreamID)) + } + + switch fh.Type { + case FrameHeaders, FrameContinuation: + if fh.Flags.Has(FlagHeadersEndHeaders) { + h2f.lastHeaderStream = 0 + } else { + h2f.lastHeaderStream = fh.StreamID + } + } + + return nil +} + +// A DataFrame conveys arbitrary, variable-length sequences of octets +// associated with a stream. +// See https://httpwg.org/specs/rfc7540.html#rfc.section.6.1 +type DataFrame struct { + FrameHeader + data []byte +} + +func (f *DataFrame) StreamEnded() bool { + return f.FrameHeader.Flags.Has(FlagDataEndStream) +} + +// Data returns the frame's data octets, not including any padding +// size byte or padding suffix bytes. +// The caller must not retain the returned memory past the next +// call to ReadFrame. +func (f *DataFrame) Data() []byte { + f.checkValid() + return f.data +} + +func parseDataFrame(fc *frameCache, fh FrameHeader, countError func(string), payload []byte) (Frame, error) { + if fh.StreamID == 0 { + // DATA frames MUST be associated with a stream. If a + // DATA frame is received whose stream identifier + // field is 0x0, the recipient MUST respond with a + // connection error (Section 5.4.1) of type + // PROTOCOL_ERROR. + countError("frame_data_stream_0") + return nil, connError{ErrCodeProtocol, "DATA frame with stream ID 0"} + } + f := fc.getDataFrame() + f.FrameHeader = fh + + var padSize byte + if fh.Flags.Has(FlagDataPadded) { + var err error + payload, padSize, err = readByte(payload) + if err != nil { + countError("frame_data_pad_byte_short") + return nil, err + } + } + if int(padSize) > len(payload) { + // If the length of the padding is greater than the + // length of the frame payload, the recipient MUST + // treat this as a connection error. + // Filed: https://github.com/http2/http2-spec/issues/610 + countError("frame_data_pad_too_big") + return nil, connError{ErrCodeProtocol, "pad size larger than data payload"} + } + f.data = payload[:len(payload)-int(padSize)] + return f, nil +} + +var ( + errStreamID = errors.New("invalid stream ID") + errDepStreamID = errors.New("invalid dependent stream ID") + errPadLength = errors.New("pad length too large") + errPadBytes = errors.New("padding bytes must all be zeros unless AllowIllegalWrites is enabled") +) + +func validStreamIDOrZero(streamID uint32) bool { + return streamID&(1<<31) == 0 +} + +func validStreamID(streamID uint32) bool { + return streamID != 0 && streamID&(1<<31) == 0 +} + +// writeData writes a DATA frame. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility not to violate the maximum frame size +// and to not call other Write methods concurrently. +func (h2f *Framer) WriteData(streamID uint32, endStream bool, data []byte) error { + return h2f.WriteDataPadded(streamID, endStream, data, nil) +} + +// WriteDataPadded writes a DATA frame with optional padding. +// +// If pad is nil, the padding bit is not sent. +// The length of pad must not exceed 255 bytes. +// The bytes of pad must all be zero, unless f.AllowIllegalWrites is set. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility not to violate the maximum frame size +// and to not call other Write methods concurrently. +func (h2f *Framer) WriteDataPadded(streamID uint32, endStream bool, data, pad []byte) error { + if err := h2f.startWriteDataPadded(streamID, endStream, data, pad); err != nil { + return err + } + return h2f.endWrite() +} + +// startWriteDataPadded is WriteDataPadded, but only writes the frame to the Framer's internal buffer. +// The caller should call endWrite to flush the frame to the underlying writer. +func (h2f *Framer) startWriteDataPadded(streamID uint32, endStream bool, data, pad []byte) error { + if !validStreamID(streamID) && !h2f.AllowIllegalWrites { + return errStreamID + } + if len(pad) > 0 { + if len(pad) > 255 { + return errPadLength + } + if !h2f.AllowIllegalWrites { + for _, b := range pad { + if b != 0 { + // "Padding octets MUST be set to zero when sending." + return errPadBytes + } + } + } + } + var flags Flags + if endStream { + flags |= FlagDataEndStream + } + if pad != nil { + flags |= FlagDataPadded + } + h2f.startWrite(FrameData, flags, streamID) + if pad != nil { + h2f.wbuf = append(h2f.wbuf, byte(len(pad))) + } + h2f.wbuf = append(h2f.wbuf, data...) + h2f.wbuf = append(h2f.wbuf, pad...) + return nil +} + +// A SettingsFrame conveys configuration parameters that affect how +// endpoints communicate, such as preferences and constraints on peer +// behavior. +// +// See https://httpwg.org/specs/rfc7540.html#SETTINGS +type SettingsFrame struct { + FrameHeader + p []byte +} + +func parseSettingsFrame(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (Frame, error) { + if fh.Flags.Has(FlagSettingsAck) && fh.Length > 0 { + // When this (ACK 0x1) bit is set, the payload of the + // SETTINGS frame MUST be empty. Receipt of a + // SETTINGS frame with the ACK flag set and a length + // field value other than 0 MUST be treated as a + // connection error (Section 5.4.1) of type + // FRAME_SIZE_ERROR. + countError("frame_settings_ack_with_length") + return nil, ConnectionError(ErrCodeFrameSize) + } + if fh.StreamID != 0 { + // SETTINGS frames always apply to a connection, + // never a single stream. The stream identifier for a + // SETTINGS frame MUST be zero (0x0). If an endpoint + // receives a SETTINGS frame whose stream identifier + // field is anything other than 0x0, the endpoint MUST + // respond with a connection error (Section 5.4.1) of + // type PROTOCOL_ERROR. + countError("frame_settings_has_stream") + return nil, ConnectionError(ErrCodeProtocol) + } + if len(p)%6 != 0 { + countError("frame_settings_mod_6") + // Expecting even number of 6 byte settings. + return nil, ConnectionError(ErrCodeFrameSize) + } + f := &SettingsFrame{FrameHeader: fh, p: p} + if v, ok := f.Value(http2.SettingInitialWindowSize); ok && v > (1<<31)-1 { + countError("frame_settings_window_size_too_big") + // Values above the maximum flow control window size of 2^31 - 1 MUST + // be treated as a connection error (Section 5.4.1) of type + // FLOW_CONTROL_ERROR. + return nil, ConnectionError(ErrCodeFlowControl) + } + return f, nil +} + +func (f *SettingsFrame) IsAck() bool { + return f.FrameHeader.Flags.Has(FlagSettingsAck) +} + +func (f *SettingsFrame) Value(id http2.SettingID) (v uint32, ok bool) { + f.checkValid() + for i := 0; i < f.NumSettings(); i++ { + if s := f.Setting(i); s.ID == id { + return s.Val, true + } + } + return 0, false +} + +// Setting returns the setting from the frame at the given 0-based index. +// The index must be >= 0 and less than f.NumSettings(). +func (f *SettingsFrame) Setting(i int) http2.Setting { + buf := f.p + return http2.Setting{ + ID: http2.SettingID(binary.BigEndian.Uint16(buf[i*6 : i*6+2])), + Val: binary.BigEndian.Uint32(buf[i*6+2 : i*6+6]), + } +} + +func (f *SettingsFrame) NumSettings() int { return len(f.p) / 6 } + +// HasDuplicates reports whether f contains any duplicate setting IDs. +func (f *SettingsFrame) HasDuplicates() bool { + num := f.NumSettings() + if num == 0 { + return false + } + // If it's small enough (the common case), just do the n^2 + // thing and avoid a map allocation. + if num < 10 { + for i := 0; i < num; i++ { + idi := f.Setting(i).ID + for j := i + 1; j < num; j++ { + idj := f.Setting(j).ID + if idi == idj { + return true + } + } + } + return false + } + seen := map[http2.SettingID]bool{} + for i := 0; i < num; i++ { + id := f.Setting(i).ID + if seen[id] { + return true + } + seen[id] = true + } + return false +} + +// ForeachSetting runs fn for each setting. +// It stops and returns the first error. +func (f *SettingsFrame) ForeachSetting(fn func(http2.Setting) error) error { + f.checkValid() + for i := 0; i < f.NumSettings(); i++ { + if err := fn(f.Setting(i)); err != nil { + return err + } + } + return nil +} + +// WriteSettings writes a SETTINGS frame with zero or more settings +// specified and the ACK bit not set. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (h2f *Framer) WriteSettings(settings ...http2.Setting) error { + h2f.startWrite(FrameSettings, 0, 0) + for _, s := range settings { + h2f.writeUint16(uint16(s.ID)) + h2f.writeUint32(s.Val) + } + return h2f.endWrite() +} + +// WriteSettingsAck writes an empty SETTINGS frame with the ACK bit set. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (h2f *Framer) WriteSettingsAck() error { + h2f.startWrite(FrameSettings, FlagSettingsAck, 0) + return h2f.endWrite() +} + +// A PingFrame is a mechanism for measuring a minimal round trip time +// from the sender, as well as determining whether an idle connection +// is still functional. +// See https://httpwg.org/specs/rfc7540.html#rfc.section.6.7 +type PingFrame struct { + FrameHeader + Data [8]byte +} + +func (f *PingFrame) IsAck() bool { return f.Flags.Has(FlagPingAck) } + +func parsePingFrame(_ *frameCache, fh FrameHeader, countError func(string), payload []byte) (Frame, error) { + if len(payload) != 8 { + countError("frame_ping_length") + return nil, ConnectionError(ErrCodeFrameSize) + } + if fh.StreamID != 0 { + countError("frame_ping_has_stream") + return nil, ConnectionError(ErrCodeProtocol) + } + f := &PingFrame{FrameHeader: fh} + copy(f.Data[:], payload) + return f, nil +} + +func (h2f *Framer) WritePing(ack bool, data [8]byte) error { + var flags Flags + if ack { + flags = FlagPingAck + } + h2f.startWrite(FramePing, flags, 0) + h2f.writeBytes(data[:]) + return h2f.endWrite() +} + +// A GoAwayFrame informs the remote peer to stop creating streams on this connection. +// See https://httpwg.org/specs/rfc7540.html#rfc.section.6.8 +type GoAwayFrame struct { + FrameHeader + LastStreamID uint32 + ErrCode ErrCode + debugData []byte +} + +// DebugData returns any debug data in the GOAWAY frame. Its contents +// are not defined. +// The caller must not retain the returned memory past the next +// call to ReadFrame. +func (f *GoAwayFrame) DebugData() []byte { + f.checkValid() + return f.debugData +} + +func parseGoAwayFrame(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (Frame, error) { + if fh.StreamID != 0 { + countError("frame_goaway_has_stream") + return nil, ConnectionError(ErrCodeProtocol) + } + if len(p) < 8 { + countError("frame_goaway_short") + return nil, ConnectionError(ErrCodeFrameSize) + } + return &GoAwayFrame{ + FrameHeader: fh, + LastStreamID: binary.BigEndian.Uint32(p[:4]) & (1<<31 - 1), + ErrCode: ErrCode(binary.BigEndian.Uint32(p[4:8])), + debugData: p[8:], + }, nil +} + +func (h2f *Framer) WriteGoAway(maxStreamID uint32, code ErrCode, debugData []byte) error { + h2f.startWrite(FrameGoAway, 0, 0) + h2f.writeUint32(maxStreamID & (1<<31 - 1)) + h2f.writeUint32(uint32(code)) + h2f.writeBytes(debugData) + return h2f.endWrite() +} + +// An UnknownFrame is the frame type returned when the frame type is unknown +// or no specific frame type parser exists. +type UnknownFrame struct { + FrameHeader + p []byte +} + +// Payload returns the frame's payload (after the header). It is not +// valid to call this method after a subsequent call to +// Framer.ReadFrame, nor is it valid to retain the returned slice. +// The memory is owned by the Framer and is invalidated when the next +// frame is read. +func (f *UnknownFrame) Payload() []byte { + f.checkValid() + return f.p +} + +func parseUnknownFrame(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (Frame, error) { + return &UnknownFrame{fh, p}, nil +} + +// A WindowUpdateFrame is used to implement flow control. +// See https://httpwg.org/specs/rfc7540.html#rfc.section.6.9 +type WindowUpdateFrame struct { + FrameHeader + Increment uint32 // never read with high bit set +} + +func parseWindowUpdateFrame(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (Frame, error) { + if len(p) != 4 { + countError("frame_windowupdate_bad_len") + return nil, ConnectionError(ErrCodeFrameSize) + } + inc := binary.BigEndian.Uint32(p[:4]) & 0x7fffffff // mask off high reserved bit + if inc == 0 { + // A receiver MUST treat the receipt of a + // WINDOW_UPDATE frame with an flow control window + // increment of 0 as a stream error (Section 5.4.2) of + // type PROTOCOL_ERROR; errors on the connection flow + // control window MUST be treated as a connection + // error (Section 5.4.1). + if fh.StreamID == 0 { + countError("frame_windowupdate_zero_inc_conn") + return nil, ConnectionError(ErrCodeProtocol) + } + countError("frame_windowupdate_zero_inc_stream") + return nil, streamError(fh.StreamID, ErrCodeProtocol) + } + return &WindowUpdateFrame{ + FrameHeader: fh, + Increment: inc, + }, nil +} + +// WriteWindowUpdate writes a WINDOW_UPDATE frame. +// The increment value must be between 1 and 2,147,483,647, inclusive. +// If the Stream ID is zero, the window update applies to the +// connection as a whole. +func (h2f *Framer) WriteWindowUpdate(streamID, incr uint32) error { + // "The legal range for the increment to the flow control window is 1 to 2^31-1 (2,147,483,647) octets." + if (incr < 1 || incr > 2147483647) && !h2f.AllowIllegalWrites { + return errors.New("illegal window increment value") + } + h2f.startWrite(FrameWindowUpdate, 0, streamID) + h2f.writeUint32(incr) + return h2f.endWrite() +} + +// A HeadersFrame is used to open a stream and additionally carries a +// header block fragment. +type HeadersFrame struct { + FrameHeader + + // Priority is set if FlagHeadersPriority is set in the FrameHeader. + Priority http2.PriorityParam + + headerFragBuf []byte // not owned +} + +func (f *HeadersFrame) HeaderBlockFragment() []byte { + f.checkValid() + return f.headerFragBuf +} + +func (f *HeadersFrame) HeadersEnded() bool { + return f.FrameHeader.Flags.Has(FlagHeadersEndHeaders) +} + +func (f *HeadersFrame) StreamEnded() bool { + return f.FrameHeader.Flags.Has(FlagHeadersEndStream) +} + +func (f *HeadersFrame) HasPriority() bool { + return f.FrameHeader.Flags.Has(FlagHeadersPriority) +} + +func parseHeadersFrame(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (_ Frame, err error) { + hf := &HeadersFrame{ + FrameHeader: fh, + } + if fh.StreamID == 0 { + // HEADERS frames MUST be associated with a stream. If a HEADERS frame + // is received whose stream identifier field is 0x0, the recipient MUST + // respond with a connection error (Section 5.4.1) of type + // PROTOCOL_ERROR. + countError("frame_headers_zero_stream") + return nil, connError{ErrCodeProtocol, "HEADERS frame with stream ID 0"} + } + var padLength uint8 + if fh.Flags.Has(FlagHeadersPadded) { + if p, padLength, err = readByte(p); err != nil { + countError("frame_headers_pad_short") + return + } + } + if fh.Flags.Has(FlagHeadersPriority) { + var v uint32 + p, v, err = readUint32(p) + if err != nil { + countError("frame_headers_prio_short") + return nil, err + } + hf.Priority.StreamDep = v & 0x7fffffff + hf.Priority.Exclusive = (v != hf.Priority.StreamDep) // high bit was set + p, hf.Priority.Weight, err = readByte(p) + if err != nil { + countError("frame_headers_prio_weight_short") + return nil, err + } + } + if len(p)-int(padLength) < 0 { + countError("frame_headers_pad_too_big") + return nil, streamError(fh.StreamID, ErrCodeProtocol) + } + hf.headerFragBuf = p[:len(p)-int(padLength)] + return hf, nil +} + +// HeadersFrameParam are the parameters for writing a HEADERS frame. +type HeadersFrameParam struct { + // StreamID is the required Stream ID to initiate. + StreamID uint32 + // BlockFragment is part (or all) of a Header Block. + BlockFragment []byte + + // EndStream indicates that the header block is the last that + // the endpoint will send for the identified stream. Setting + // this flag causes the stream to enter one of "half closed" + // states. + EndStream bool + + // EndHeaders indicates that this frame contains an entire + // header block and is not followed by any + // CONTINUATION frames. + EndHeaders bool + + // PadLength is the optional number of bytes of zeros to add + // to this frame. + PadLength uint8 + + // Priority, if non-zero, includes stream priority information + // in the HEADER frame. + Priority http2.PriorityParam +} + +// WriteHeaders writes a single HEADERS frame. +// +// This is a low-level header writing method. Encoding headers and +// splitting them into any necessary CONTINUATION frames is handled +// elsewhere. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (h2f *Framer) WriteHeaders(p HeadersFrameParam) error { + if !validStreamID(p.StreamID) && !h2f.AllowIllegalWrites { + return errStreamID + } + var flags Flags + if p.PadLength != 0 { + flags |= FlagHeadersPadded + } + if p.EndStream { + flags |= FlagHeadersEndStream + } + if p.EndHeaders { + flags |= FlagHeadersEndHeaders + } + if !p.Priority.IsZero() { + flags |= FlagHeadersPriority + } + h2f.startWrite(FrameHeaders, flags, p.StreamID) + if p.PadLength != 0 { + h2f.writeByte(p.PadLength) + } + if !p.Priority.IsZero() { + v := p.Priority.StreamDep + if !validStreamIDOrZero(v) && !h2f.AllowIllegalWrites { + return errDepStreamID + } + if p.Priority.Exclusive { + v |= 1 << 31 + } + h2f.writeUint32(v) + h2f.writeByte(p.Priority.Weight) + } + h2f.wbuf = append(h2f.wbuf, p.BlockFragment...) + h2f.wbuf = append(h2f.wbuf, padZeros[:p.PadLength]...) + return h2f.endWrite() +} + +// A PriorityFrame specifies the sender-advised priority of a stream. +// See https://httpwg.org/specs/rfc7540.html#rfc.section.6.3 +type PriorityFrame struct { + FrameHeader + http2.PriorityParam +} + +func parsePriorityFrame(_ *frameCache, fh FrameHeader, countError func(string), payload []byte) (Frame, error) { + if fh.StreamID == 0 { + countError("frame_priority_zero_stream") + return nil, connError{ErrCodeProtocol, "PRIORITY frame with stream ID 0"} + } + if len(payload) != 5 { + countError("frame_priority_bad_length") + return nil, connError{ErrCodeFrameSize, fmt.Sprintf("PRIORITY frame payload size was %d; want 5", len(payload))} + } + v := binary.BigEndian.Uint32(payload[:4]) + streamID := v & 0x7fffffff // mask off high bit + return &PriorityFrame{ + FrameHeader: fh, + PriorityParam: http2.PriorityParam{ + Weight: payload[4], + StreamDep: streamID, + Exclusive: streamID != v, // was high bit set? + }, + }, nil +} + +// WritePriority writes a PRIORITY frame. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (h2f *Framer) WritePriority(streamID uint32, p http2.PriorityParam) error { + if !validStreamID(streamID) && !h2f.AllowIllegalWrites { + return errStreamID + } + if !validStreamIDOrZero(p.StreamDep) { + return errDepStreamID + } + h2f.startWrite(FramePriority, 0, streamID) + v := p.StreamDep + if p.Exclusive { + v |= 1 << 31 + } + h2f.writeUint32(v) + h2f.writeByte(p.Weight) + return h2f.endWrite() +} + +// A RSTStreamFrame allows for abnormal termination of a stream. +// See https://httpwg.org/specs/rfc7540.html#rfc.section.6.4 +type RSTStreamFrame struct { + FrameHeader + ErrCode ErrCode +} + +func parseRSTStreamFrame(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (Frame, error) { + if len(p) != 4 { + countError("frame_rststream_bad_len") + return nil, ConnectionError(ErrCodeFrameSize) + } + if fh.StreamID == 0 { + countError("frame_rststream_zero_stream") + return nil, ConnectionError(ErrCodeProtocol) + } + return &RSTStreamFrame{fh, ErrCode(binary.BigEndian.Uint32(p[:4]))}, nil +} + +// WriteRSTStream writes a RST_STREAM frame. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (h2f *Framer) WriteRSTStream(streamID uint32, code ErrCode) error { + if !validStreamID(streamID) && !h2f.AllowIllegalWrites { + return errStreamID + } + h2f.startWrite(FrameRSTStream, 0, streamID) + h2f.writeUint32(uint32(code)) + return h2f.endWrite() +} + +// A ContinuationFrame is used to continue a sequence of header block fragments. +// See https://httpwg.org/specs/rfc7540.html#rfc.section.6.10 +type ContinuationFrame struct { + FrameHeader + headerFragBuf []byte +} + +func parseContinuationFrame(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (Frame, error) { + if fh.StreamID == 0 { + countError("frame_continuation_zero_stream") + return nil, connError{ErrCodeProtocol, "CONTINUATION frame with stream ID 0"} + } + return &ContinuationFrame{fh, p}, nil +} + +func (f *ContinuationFrame) HeaderBlockFragment() []byte { + f.checkValid() + return f.headerFragBuf +} + +func (f *ContinuationFrame) HeadersEnded() bool { + return f.FrameHeader.Flags.Has(FlagContinuationEndHeaders) +} + +// WriteContinuation writes a CONTINUATION frame. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (h2f *Framer) WriteContinuation(streamID uint32, endHeaders bool, headerBlockFragment []byte) error { + if !validStreamID(streamID) && !h2f.AllowIllegalWrites { + return errStreamID + } + var flags Flags + if endHeaders { + flags |= FlagContinuationEndHeaders + } + h2f.startWrite(FrameContinuation, flags, streamID) + h2f.wbuf = append(h2f.wbuf, headerBlockFragment...) + return h2f.endWrite() +} + +// A PushPromiseFrame is used to initiate a server stream. +// See https://httpwg.org/specs/rfc7540.html#rfc.section.6.6 +type PushPromiseFrame struct { + FrameHeader + PromiseID uint32 + headerFragBuf []byte // not owned +} + +func (f *PushPromiseFrame) HeaderBlockFragment() []byte { + f.checkValid() + return f.headerFragBuf +} + +func (f *PushPromiseFrame) HeadersEnded() bool { + return f.FrameHeader.Flags.Has(FlagPushPromiseEndHeaders) +} + +func parsePushPromise(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (_ Frame, err error) { + pp := &PushPromiseFrame{ + FrameHeader: fh, + } + if pp.StreamID == 0 { + // PUSH_PROMISE frames MUST be associated with an existing, + // peer-initiated stream. The stream identifier of a + // PUSH_PROMISE frame indicates the stream it is associated + // with. If the stream identifier field specifies the value + // 0x0, a recipient MUST respond with a connection error + // (Section 5.4.1) of type PROTOCOL_ERROR. + countError("frame_pushpromise_zero_stream") + return nil, ConnectionError(ErrCodeProtocol) + } + // The PUSH_PROMISE frame includes optional padding. + // Padding fields and flags are identical to those defined for DATA frames + var padLength uint8 + if fh.Flags.Has(FlagPushPromisePadded) { + if p, padLength, err = readByte(p); err != nil { + countError("frame_pushpromise_pad_short") + return + } + } + + p, pp.PromiseID, err = readUint32(p) + if err != nil { + countError("frame_pushpromise_promiseid_short") + return + } + pp.PromiseID = pp.PromiseID & (1<<31 - 1) + + if int(padLength) > len(p) { + // like the DATA frame, error out if padding is longer than the body. + countError("frame_pushpromise_pad_too_big") + return nil, ConnectionError(ErrCodeProtocol) + } + pp.headerFragBuf = p[:len(p)-int(padLength)] + return pp, nil +} + +// PushPromiseParam are the parameters for writing a PUSH_PROMISE frame. +type PushPromiseParam struct { + // StreamID is the required Stream ID to initiate. + StreamID uint32 + + // PromiseID is the required Stream ID which this + // Push Promises + PromiseID uint32 + + // BlockFragment is part (or all) of a Header Block. + BlockFragment []byte + + // EndHeaders indicates that this frame contains an entire + // header block and is not followed by any + // CONTINUATION frames. + EndHeaders bool + + // PadLength is the optional number of bytes of zeros to add + // to this frame. + PadLength uint8 +} + +// WritePushPromise writes a single PushPromise Frame. +// +// As with Header Frames, This is the low level call for writing +// individual frames. Continuation frames are handled elsewhere. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (h2f *Framer) WritePushPromise(p PushPromiseParam) error { + if !validStreamID(p.StreamID) && !h2f.AllowIllegalWrites { + return errStreamID + } + var flags Flags + if p.PadLength != 0 { + flags |= FlagPushPromisePadded + } + if p.EndHeaders { + flags |= FlagPushPromiseEndHeaders + } + h2f.startWrite(FramePushPromise, flags, p.StreamID) + if p.PadLength != 0 { + h2f.writeByte(p.PadLength) + } + if !validStreamID(p.PromiseID) && !h2f.AllowIllegalWrites { + return errStreamID + } + h2f.writeUint32(p.PromiseID) + h2f.wbuf = append(h2f.wbuf, p.BlockFragment...) + h2f.wbuf = append(h2f.wbuf, padZeros[:p.PadLength]...) + return h2f.endWrite() +} + +// WriteRawFrame writes a raw frame. This can be used to write +// extension frames unknown to this package. +func (h2f *Framer) WriteRawFrame(t FrameType, flags Flags, streamID uint32, payload []byte) error { + h2f.startWrite(t, flags, streamID) + h2f.writeBytes(payload) + return h2f.endWrite() +} + +func readByte(p []byte) (remain []byte, b byte, err error) { + if len(p) == 0 { + return nil, 0, io.ErrUnexpectedEOF + } + return p[1:], p[0], nil +} + +func readUint32(p []byte) (remain []byte, v uint32, err error) { + if len(p) < 4 { + return nil, 0, io.ErrUnexpectedEOF + } + return p[4:], binary.BigEndian.Uint32(p[:4]), nil +} + +type streamEnder interface { + StreamEnded() bool +} + +type headersEnder interface { + HeadersEnded() bool +} + +type headersOrContinuation interface { + headersEnder + HeaderBlockFragment() []byte +} + +// A MetaHeadersFrame is the representation of one HEADERS frame and +// zero or more contiguous CONTINUATION frames and the decoding of +// their HPACK-encoded contents. +// +// This type of frame does not appear on the wire and is only returned +// by the Framer when Framer.ReadMetaHeaders is set. +type MetaHeadersFrame struct { + *HeadersFrame + + // Fields are the fields contained in the HEADERS and + // CONTINUATION frames. The underlying slice is owned by the + // Framer and must not be retained after the next call to + // ReadFrame. + // + // Fields are guaranteed to be in the correct http2 order and + // not have unknown pseudo header fields or invalid header + // field names or values. Required pseudo header fields may be + // missing, however. Use the MetaHeadersFrame.Pseudo accessor + // method access pseudo headers. + Fields []hpack.HeaderField + + // Truncated is whether the max header list size limit was hit + // and Fields is incomplete. The hpack decoder state is still + // valid, however. + Truncated bool +} + +// PseudoValue returns the given pseudo header field's value. +// The provided pseudo field should not contain the leading colon. +func (mh *MetaHeadersFrame) PseudoValue(pseudo string) string { + for _, hf := range mh.Fields { + if !hf.IsPseudo() { + return "" + } + if hf.Name[1:] == pseudo { + return hf.Value + } + } + return "" +} + +// RegularFields returns the regular (non-pseudo) header fields of mh. +// The caller does not own the returned slice. +func (mh *MetaHeadersFrame) RegularFields() []hpack.HeaderField { + for i, hf := range mh.Fields { + if !hf.IsPseudo() { + return mh.Fields[i:] + } + } + return nil +} + +// PseudoFields returns the pseudo header fields of mh. +// The caller does not own the returned slice. +func (mh *MetaHeadersFrame) PseudoFields() []hpack.HeaderField { + for i, hf := range mh.Fields { + if !hf.IsPseudo() { + return mh.Fields[:i] + } + } + return mh.Fields +} + +func (mh *MetaHeadersFrame) checkPseudos() error { + var isRequest, isResponse bool + pf := mh.PseudoFields() + for i, hf := range pf { + switch hf.Name { + case ":method", ":path", ":scheme", ":authority": + isRequest = true + case ":status": + isResponse = true + default: + return pseudoHeaderError(hf.Name) + } + // Check for duplicates. + // This would be a bad algorithm, but N is 4. + // And this doesn't allocate. + for _, hf2 := range pf[:i] { + if hf.Name == hf2.Name { + return duplicatePseudoHeaderError(hf.Name) + } + } + } + if isRequest && isResponse { + return errMixPseudoHeaderTypes + } + return nil +} + +func (fr *Framer) maxHeaderStringLen() int { + v := int(fr.maxHeaderListSize()) + if v < 0 { + // If maxHeaderListSize overflows an int, use no limit (0). + return 0 + } + return v +} + +// readMetaFrame returns 0 or more CONTINUATION frames from fr and +// merge them into the provided hf and returns a MetaHeadersFrame +// with the decoded hpack values. +func (h2f *Framer) readMetaFrame(hf *HeadersFrame, dumps []*dump.Dumper) (Frame, error) { + if h2f.AllowIllegalReads { + return nil, errors.New("illegal use of AllowIllegalReads with ReadMetaHeaders") + } + mh := &MetaHeadersFrame{ + HeadersFrame: hf, + } + remainSize := h2f.maxHeaderListSize() + var sawRegular bool + + var invalid error // pseudo header field errors + hdec := h2f.ReadMetaHeaders + hdec.SetEmitEnabled(true) + hdec.SetMaxStringLength(h2f.maxHeaderStringLen()) + rawEmitFunc := func(hf hpack.HeaderField) { + if VerboseLogs && h2f.logReads { + h2f.debugReadLoggerf("http2: decoded hpack field %+v", hf) + } + if !httpguts.ValidHeaderFieldValue(hf.Value) { + // Don't include the value in the error, because it may be sensitive. + invalid = headerFieldValueError(hf.Name) + } + isPseudo := strings.HasPrefix(hf.Name, ":") + if isPseudo { + if sawRegular { + invalid = errPseudoAfterRegular + } + } else { + sawRegular = true + if !validWireHeaderFieldName(hf.Name) { + invalid = headerFieldNameError(hf.Name) + } + } + + if invalid != nil { + hdec.SetEmitEnabled(false) + return + } + + size := hf.Size() + if size > remainSize { + hdec.SetEmitEnabled(false) + mh.Truncated = true + remainSize = 0 + return + } + remainSize -= size + + mh.Fields = append(mh.Fields, hf) + } + emitFunc := rawEmitFunc + + ds := dump.Dumpers(dumps) + if ds.ShouldDump() { + emitFunc = func(hf hpack.HeaderField) { + ds.DumpResponseHeader([]byte(fmt.Sprintf("%s: %s\r\n", hf.Name, hf.Value))) + rawEmitFunc(hf) + } + } + + hdec.SetEmitFunc(emitFunc) + // Lose reference to MetaHeadersFrame: + defer hdec.SetEmitFunc(func(hf hpack.HeaderField) {}) + + var hc headersOrContinuation = hf + for { + frag := hc.HeaderBlockFragment() + + // Avoid parsing large amounts of headers that we will then discard. + // If the sender exceeds the max header list size by too much, + // skip parsing the fragment and close the connection. + // + // "Too much" is either any CONTINUATION frame after we've already + // exceeded the max header list size (in which case remainSize is 0), + // or a frame whose encoded size is more than twice the remaining + // header list bytes we're willing to accept. + if int64(len(frag)) > int64(2*remainSize) { + if VerboseLogs { + log.Printf("http2: header list too large") + } + // It would be nice to send a RST_STREAM before sending the GOAWAY, + // but the structure of the server's frame writer makes this difficult. + return mh, ConnectionError(ErrCodeProtocol) + } + + // Also close the connection after any CONTINUATION frame following an + // invalid header, since we stop tracking the size of the headers after + // an invalid one. + if invalid != nil { + if VerboseLogs { + log.Printf("http2: invalid header: %v", invalid) + } + // It would be nice to send a RST_STREAM before sending the GOAWAY, + // but the structure of the server's frame writer makes this difficult. + return mh, ConnectionError(ErrCodeProtocol) + } + + if _, err := hdec.Write(frag); err != nil { + return mh, ConnectionError(ErrCodeCompression) + } + + if hc.HeadersEnded() { + break + } + if f, err := h2f.ReadFrame(); err != nil { + return nil, err + } else { + hc = f.(*ContinuationFrame) // guaranteed by checkFrameOrder + } + } + + mh.HeadersFrame.headerFragBuf = nil + mh.HeadersFrame.invalidate() + + if err := hdec.Close(); err != nil { + return mh, ConnectionError(ErrCodeCompression) + } + if invalid != nil { + h2f.errDetail = invalid + if VerboseLogs { + log.Printf("http2: invalid header: %v", invalid) + } + return nil, StreamError{mh.StreamID, ErrCodeProtocol, invalid} + } + if err := mh.checkPseudos(); err != nil { + h2f.errDetail = err + if VerboseLogs { + log.Printf("http2: invalid pseudo headers: %v", err) + } + return nil, StreamError{mh.StreamID, ErrCodeProtocol, err} + } + return mh, nil +} + +func summarizeFrame(f Frame) string { + var buf bytes.Buffer + f.Header().writeDebug(&buf) + switch f := f.(type) { + case *SettingsFrame: + n := 0 + f.ForeachSetting(func(s http2.Setting) error { + n++ + if n == 1 { + buf.WriteString(", settings:") + } + fmt.Fprintf(&buf, " %v=%v,", s.ID, s.Val) + return nil + }) + if n > 0 { + buf.Truncate(buf.Len() - 1) // remove trailing comma + } + case *DataFrame: + data := f.Data() + const max = 256 + if len(data) > max { + data = data[:max] + } + fmt.Fprintf(&buf, " data=%q", data) + if len(f.Data()) > max { + fmt.Fprintf(&buf, " (%d bytes omitted)", len(f.Data())-max) + } + case *WindowUpdateFrame: + if f.StreamID == 0 { + buf.WriteString(" (conn)") + } + fmt.Fprintf(&buf, " incr=%v", f.Increment) + case *PingFrame: + fmt.Fprintf(&buf, " ping=%q", f.Data[:]) + case *GoAwayFrame: + fmt.Fprintf(&buf, " LastStreamID=%v ErrCode=%v Debug=%q", + f.LastStreamID, f.ErrCode, f.debugData) + case *RSTStreamFrame: + fmt.Fprintf(&buf, " ErrCode=%v", f.ErrCode) + } + return buf.String() +} diff --git a/internal/http2/gotrack.go b/internal/http2/gotrack.go new file mode 100644 index 00000000..9933c9f8 --- /dev/null +++ b/internal/http2/gotrack.go @@ -0,0 +1,170 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Defensive debug-only utility to track that functions run on the +// goroutine that they're supposed to. + +package http2 + +import ( + "bytes" + "errors" + "fmt" + "os" + "runtime" + "strconv" + "sync" +) + +var DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1" + +type goroutineLock uint64 + +func newGoroutineLock() goroutineLock { + if !DebugGoroutines { + return 0 + } + return goroutineLock(curGoroutineID()) +} + +func (g goroutineLock) check() { + if !DebugGoroutines { + return + } + if curGoroutineID() != uint64(g) { + panic("running on the wrong goroutine") + } +} + +func (g goroutineLock) checkNotOn() { + if !DebugGoroutines { + return + } + if curGoroutineID() == uint64(g) { + panic("running on the wrong goroutine") + } +} + +var goroutineSpace = []byte("goroutine ") + +func curGoroutineID() uint64 { + bp := littleBuf.Get().(*[]byte) + defer littleBuf.Put(bp) + b := *bp + b = b[:runtime.Stack(b, false)] + // Parse the 4707 out of "goroutine 4707 [" + b = bytes.TrimPrefix(b, goroutineSpace) + i := bytes.IndexByte(b, ' ') + if i < 0 { + panic(fmt.Sprintf("No space found in %q", b)) + } + b = b[:i] + n, err := parseUintBytes(b, 10, 64) + if err != nil { + panic(fmt.Sprintf("Failed to parse goroutine ID out of %q: %v", b, err)) + } + return n +} + +var littleBuf = sync.Pool{ + New: func() interface{} { + buf := make([]byte, 64) + return &buf + }, +} + +// parseUintBytes is like strconv.ParseUint, but using a []byte. +func parseUintBytes(s []byte, base int, bitSize int) (n uint64, err error) { + var cutoff, maxVal uint64 + + if bitSize == 0 { + bitSize = int(strconv.IntSize) + } + + s0 := s + switch { + case len(s) < 1: + err = strconv.ErrSyntax + goto Error + + case 2 <= base && base <= 36: + // valid base; nothing to do + + case base == 0: + // Look for octal, hex prefix. + switch { + case s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'): + base = 16 + s = s[2:] + if len(s) < 1 { + err = strconv.ErrSyntax + goto Error + } + case s[0] == '0': + base = 8 + default: + base = 10 + } + + default: + err = errors.New("invalid base " + strconv.Itoa(base)) + goto Error + } + + n = 0 + cutoff = cutoff64(base) + maxVal = 1<= base { + n = 0 + err = strconv.ErrSyntax + goto Error + } + + if n >= cutoff { + // n*base overflows + n = 1<<64 - 1 + err = strconv.ErrRange + goto Error + } + n *= uint64(base) + + n1 := n + uint64(v) + if n1 < n || n1 > maxVal { + // n+v overflows + n = 1<<64 - 1 + err = strconv.ErrRange + goto Error + } + n = n1 + } + + return n, nil + +Error: + return n, &strconv.NumError{Func: "ParseUint", Num: string(s0), Err: err} +} + +// Return the first number n such that n*base >= 1<<64. +func cutoff64(base int) uint64 { + if base < 2 { + return 0 + } + return (1<<64-1)/uint64(base) + 1 +} diff --git a/internal/http2/headermap.go b/internal/http2/headermap.go new file mode 100644 index 00000000..a8e01cf9 --- /dev/null +++ b/internal/http2/headermap.go @@ -0,0 +1,106 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "github.com/imroc/req/v3/internal/ascii" + "net/http" + "sync" +) + +var ( + commonBuildOnce sync.Once + commonLowerHeader map[string]string // Go-Canonical-Case -> lower-case + commonCanonHeader map[string]string // lower-case -> Go-Canonical-Case +) + +func buildCommonHeaderMapsOnce() { + commonBuildOnce.Do(buildCommonHeaderMaps) +} + +func buildCommonHeaderMaps() { + common := []string{ + "accept", + "accept-charset", + "accept-encoding", + "accept-language", + "accept-ranges", + "age", + "access-control-allow-credentials", + "access-control-allow-headers", + "access-control-allow-methods", + "access-control-allow-origin", + "access-control-expose-headers", + "access-control-max-age", + "access-control-request-headers", + "access-control-request-method", + "allow", + "authorization", + "cache-control", + "content-disposition", + "content-encoding", + "content-language", + "content-length", + "content-location", + "content-range", + "content-type", + "cookie", + "date", + "etag", + "expect", + "expires", + "from", + "host", + "if-match", + "if-modified-since", + "if-none-match", + "if-unmodified-since", + "last-modified", + "link", + "location", + "max-forwards", + "origin", + "proxy-authenticate", + "proxy-authorization", + "range", + "referer", + "refresh", + "retry-after", + "server", + "set-cookie", + "strict-transport-security", + "trailer", + "transfer-encoding", + "user-agent", + "vary", + "via", + "www-authenticate", + "x-forwarded-for", + "x-forwarded-proto", + } + commonLowerHeader = make(map[string]string, len(common)) + commonCanonHeader = make(map[string]string, len(common)) + for _, v := range common { + chk := http.CanonicalHeaderKey(v) + commonLowerHeader[chk] = v + commonCanonHeader[v] = chk + } +} + +func lowerHeader(v string) (lower string, isAscii bool) { + buildCommonHeaderMapsOnce() + if s, ok := commonLowerHeader[v]; ok { + return s, true + } + return ascii.ToLower(v) +} + +func canonicalHeader(v string) string { + buildCommonHeaderMapsOnce() + if s, ok := commonCanonHeader[v]; ok { + return s + } + return http.CanonicalHeaderKey(v) +} diff --git a/internal/http2/http2.go b/internal/http2/http2.go new file mode 100644 index 00000000..4d38ac69 --- /dev/null +++ b/internal/http2/http2.go @@ -0,0 +1,198 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "bufio" + "crypto/tls" + "net/http" + "os" + "sort" + "strconv" + "strings" + "sync" + + "golang.org/x/net/http/httpguts" +) + +var ( + VerboseLogs bool + logFrameWrites bool + logFrameReads bool + inTests bool +) + +func init() { + e := os.Getenv("GODEBUG") + if strings.Contains(e, "http2debug=1") { + VerboseLogs = true + } + if strings.Contains(e, "http2debug=2") { + VerboseLogs = true + logFrameWrites = true + logFrameReads = true + } +} + +const ( + // ClientPreface is the string that must be sent by new + // connections from clients. + ClientPreface = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n" + + // NextProtoTLS is the NPN/ALPN protocol negotiated during + // HTTP/2's TLS setup. + NextProtoTLS = "h2" + + // https://httpwg.org/specs/rfc7540.html#SettingValues + initialHeaderTableSize = 4096 + + initialWindowSize = 65535 // 6.9.2 Initial Flow Control Window Size +) + +var clientPreface = []byte(ClientPreface) + +// validWireHeaderFieldName reports whether v is a valid header field +// name (key). See httpguts.ValidHeaderName for the base rules. +// +// Further, http2 says: +// +// "Just as in HTTP/1.x, header field names are strings of ASCII +// characters that are compared in a case-insensitive +// fashion. However, header field names MUST be converted to +// lowercase prior to their encoding in HTTP/2. " +func validWireHeaderFieldName(v string) bool { + if len(v) == 0 { + return false + } + for _, r := range v { + if !httpguts.IsTokenRune(r) { + return false + } + if 'A' <= r && r <= 'Z' { + return false + } + } + return true +} + +func httpCodeString(code int) string { + switch code { + case 200: + return "200" + case 404: + return "404" + } + return strconv.Itoa(code) +} + +// bufWriterPoolBufferSize is the size of bufio.Writer's +// buffers created using bufWriterPool. +// +// TODO: pick a less arbitrary value? this is a bit under +// (3 x typical 1500 byte MTU) at least. Other than that, +// not much thought went into it. +const bufWriterPoolBufferSize = 4 << 10 + +var bufWriterPool = sync.Pool{ + New: func() interface{} { + return bufio.NewWriterSize(nil, bufWriterPoolBufferSize) + }, +} + +func mustUint31(v int32) uint32 { + if v < 0 || v > 2147483647 { + panic("out of range") + } + return uint32(v) +} + +// bodyAllowedForStatus reports whether a given response status code +// permits a body. See RFC 7230, section 3.3. +func bodyAllowedForStatus(status int) bool { + switch { + case status >= 100 && status <= 199: + return false + case status == 204: + return false + case status == 304: + return false + } + return true +} + +type httpError struct { + _ incomparable + msg string + timeout bool +} + +func (e *httpError) Error() string { return e.msg } + +func (e *httpError) Timeout() bool { return e.timeout } + +func (e *httpError) Temporary() bool { return true } + +var errH2Timeout error = &httpError{msg: "http2: timeout awaiting response headers", timeout: true} + +type connectionStater interface { + ConnectionState() tls.ConnectionState +} + +var sorterPool = sync.Pool{New: func() interface{} { return new(sorter) }} + +type sorter struct { + v []string // owned by sorter +} + +func (s *sorter) Len() int { return len(s.v) } + +func (s *sorter) Swap(i, j int) { s.v[i], s.v[j] = s.v[j], s.v[i] } + +func (s *sorter) Less(i, j int) bool { return s.v[i] < s.v[j] } + +// Keys returns the sorted keys of h. +// +// The returned slice is only valid until s used again or returned to +// its pool. +func (s *sorter) Keys(h http.Header) []string { + keys := s.v[:0] + for k := range h { + keys = append(keys, k) + } + s.v = keys + sort.Sort(s) + return keys +} + +func (s *sorter) SortStrings(ss []string) { + // Our sorter works on s.v, which sorter owns, so + // stash it away while we sort the user's buffer. + save := s.v + s.v = ss + sort.Sort(s) + s.v = save +} + +// validPseudoPath reports whether v is a valid :path pseudo-header +// value. It must be either: +// +// *) a non-empty string starting with '/' +// *) the string '*', for OPTIONS requests. +// +// For now this is only used a quick check for deciding when to clean +// up Opaque URLs before sending requests from the Transport. +// See golang.org/issue/16847 +// +// We used to enforce that the path also didn't start with "//", but +// Google's GFE accepts such paths and Chrome sends them, so ignore +// that part of the spec. See golang.org/issue/19103. +func validPseudoPath(v string) bool { + return (len(v) > 0 && v[0] == '/') || v == "*" +} + +// incomparable is a zero-width, non-comparable type. Adding it to a struct +// makes that struct also non-comparable, and generally doesn't add +// any size (as long as it's first). +type incomparable [0]func() diff --git a/internal/http2/pipe.go b/internal/http2/pipe.go new file mode 100644 index 00000000..3b9f06b9 --- /dev/null +++ b/internal/http2/pipe.go @@ -0,0 +1,184 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "errors" + "io" + "sync" +) + +// pipe is a goroutine-safe io.Reader/io.Writer pair. It's like +// io.Pipe except there are no PipeReader/PipeWriter halves, and the +// underlying buffer is an interface. (io.Pipe is always unbuffered) +type pipe struct { + mu sync.Mutex + c sync.Cond // c.L lazily initialized to &p.mu + b pipeBuffer // nil when done reading + unread int // bytes unread when done + err error // read error once empty. non-nil means closed. + breakErr error // immediate read error (caller doesn't see rest of b) + donec chan struct{} // closed on error + readFn func() // optional code to run in Read before error +} + +type pipeBuffer interface { + Len() int + io.Writer + io.Reader +} + +// setBuffer initializes the pipe buffer. +// It has no effect if the pipe is already closed. +func (p *pipe) setBuffer(b pipeBuffer) { + p.mu.Lock() + defer p.mu.Unlock() + if p.err != nil || p.breakErr != nil { + return + } + p.b = b +} + +func (p *pipe) Len() int { + p.mu.Lock() + defer p.mu.Unlock() + if p.b == nil { + return p.unread + } + return p.b.Len() +} + +// Read waits until data is available and copies bytes +// from the buffer into p. +func (p *pipe) Read(d []byte) (n int, err error) { + p.mu.Lock() + defer p.mu.Unlock() + if p.c.L == nil { + p.c.L = &p.mu + } + for { + if p.breakErr != nil { + return 0, p.breakErr + } + if p.b != nil && p.b.Len() > 0 { + return p.b.Read(d) + } + if p.err != nil { + if p.readFn != nil { + p.readFn() // e.g. copy trailers + p.readFn = nil // not sticky like p.err + } + p.b = nil + return 0, p.err + } + p.c.Wait() + } +} + +var ( + errClosedPipeWrite = errors.New("write on closed buffer") + errUninitializedPipeWrite = errors.New("write on uninitialized buffer") +) + +// Write copies bytes from p into the buffer and wakes a reader. +// It is an error to write more data than the buffer can hold. +func (p *pipe) Write(d []byte) (n int, err error) { + p.mu.Lock() + defer p.mu.Unlock() + if p.c.L == nil { + p.c.L = &p.mu + } + defer p.c.Signal() + if p.err != nil || p.breakErr != nil { + return 0, errClosedPipeWrite + } + // pipe.setBuffer is never invoked, leaving the buffer uninitialized. + // We shouldn't try to write to an uninitialized pipe, + // but returning an error is better than panicking. + if p.b == nil { + return 0, errUninitializedPipeWrite + } + return p.b.Write(d) +} + +// CloseWithError causes the next Read (waking up a current blocked +// Read if needed) to return the provided err after all data has been +// read. +// +// The error must be non-nil. +func (p *pipe) CloseWithError(err error) { p.closeWithError(&p.err, err, nil) } + +// BreakWithError causes the next Read (waking up a current blocked +// Read if needed) to return the provided err immediately, without +// waiting for unread data. +func (p *pipe) BreakWithError(err error) { p.closeWithError(&p.breakErr, err, nil) } + +// closeWithErrorAndCode is like CloseWithError but also sets some code to run +// in the caller's goroutine before returning the error. +func (p *pipe) closeWithErrorAndCode(err error, fn func()) { p.closeWithError(&p.err, err, fn) } + +func (p *pipe) closeWithError(dst *error, err error, fn func()) { + if err == nil { + panic("err must be non-nil") + } + p.mu.Lock() + defer p.mu.Unlock() + if p.c.L == nil { + p.c.L = &p.mu + } + defer p.c.Signal() + if *dst != nil { + // Already been done. + return + } + p.readFn = fn + if dst == &p.breakErr { + if p.b != nil { + p.unread += p.b.Len() + } + p.b = nil + } + *dst = err + p.closeDoneLocked() +} + +// requires p.mu be held. +func (p *pipe) closeDoneLocked() { + if p.donec == nil { + return + } + // Close if unclosed. This isn't racy since we always + // hold p.mu while closing. + select { + case <-p.donec: + default: + close(p.donec) + } +} + +// Err returns the error (if any) first set by BreakWithError or CloseWithError. +func (p *pipe) Err() error { + p.mu.Lock() + defer p.mu.Unlock() + if p.breakErr != nil { + return p.breakErr + } + return p.err +} + +// Done returns a channel which is closed if and when this pipe is closed +// with CloseWithError. +func (p *pipe) Done() <-chan struct{} { + p.mu.Lock() + defer p.mu.Unlock() + if p.donec == nil { + p.donec = make(chan struct{}) + if p.err != nil || p.breakErr != nil { + // Already hit an error. + p.closeDoneLocked() + } + } + return p.donec +} diff --git a/internal/http2/timer.go b/internal/http2/timer.go new file mode 100644 index 00000000..0b1c17b8 --- /dev/null +++ b/internal/http2/timer.go @@ -0,0 +1,20 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package http2 + +import "time" + +// A timer is a time.Timer, as an interface which can be replaced in tests. +type timer = interface { + C() <-chan time.Time + Reset(d time.Duration) bool + Stop() bool +} + +// timeTimer adapts a time.Timer to the timer interface. +type timeTimer struct { + *time.Timer +} + +func (t timeTimer) C() <-chan time.Time { return t.Timer.C } diff --git a/internal/http2/trace.go b/internal/http2/trace.go new file mode 100644 index 00000000..0be4bc2a --- /dev/null +++ b/internal/http2/trace.go @@ -0,0 +1,84 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "net/http" + "net/http/httptrace" + "net/textproto" + "time" +) + +func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { + return trace != nil && trace.WroteHeaderField != nil +} + +func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) { + if trace != nil && trace.WroteHeaderField != nil { + trace.WroteHeaderField(k, []string{v}) + } +} + +func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { + if trace != nil { + return trace.Got1xxResponse + } + return nil +} + +func traceGetConn(req *http.Request, hostPort string) { + trace := httptrace.ContextClientTrace(req.Context()) + if trace == nil || trace.GetConn == nil { + return + } + trace.GetConn(hostPort) +} + +func traceGotConn(req *http.Request, cc *ClientConn, reused bool) { + trace := httptrace.ContextClientTrace(req.Context()) + if trace == nil || trace.GotConn == nil { + return + } + ci := httptrace.GotConnInfo{Conn: cc.tconn} + ci.Reused = reused + cc.mu.Lock() + ci.WasIdle = len(cc.streams) == 0 && reused + if ci.WasIdle && !cc.lastActive.IsZero() { + ci.IdleTime = time.Now().Sub(cc.lastActive) + } + cc.mu.Unlock() + + trace.GotConn(ci) +} + +func traceWroteHeaders(trace *httptrace.ClientTrace) { + if trace != nil && trace.WroteHeaders != nil { + trace.WroteHeaders() + } +} + +func traceGot100Continue(trace *httptrace.ClientTrace) { + if trace != nil && trace.Got100Continue != nil { + trace.Got100Continue() + } +} + +func traceWait100Continue(trace *httptrace.ClientTrace) { + if trace != nil && trace.Wait100Continue != nil { + trace.Wait100Continue() + } +} + +func traceWroteRequest(trace *httptrace.ClientTrace, err error) { + if trace != nil && trace.WroteRequest != nil { + trace.WroteRequest(httptrace.WroteRequestInfo{Err: err}) + } +} + +func traceFirstResponseByte(trace *httptrace.ClientTrace) { + if trace != nil && trace.GotFirstResponseByte != nil { + trace.GotFirstResponseByte() + } +} diff --git a/internal/http2/transport.go b/internal/http2/transport.go new file mode 100644 index 00000000..fea6685c --- /dev/null +++ b/internal/http2/transport.go @@ -0,0 +1,3215 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Transport code. + +package http2 + +import ( + "bufio" + "bytes" + "context" + "crypto/rand" + "crypto/tls" + "errors" + "fmt" + "io" + "log" + "math" + "math/bits" + mathrand "math/rand" + "net" + "net/http" + "net/http/httptrace" + "net/textproto" + "os" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/http/httpguts" + "golang.org/x/net/http2/hpack" + "golang.org/x/net/idna" + + "github.com/imroc/req/v3/http2" + "github.com/imroc/req/v3/internal/ascii" + "github.com/imroc/req/v3/internal/common" + "github.com/imroc/req/v3/internal/compress" + "github.com/imroc/req/v3/internal/dump" + "github.com/imroc/req/v3/internal/header" + "github.com/imroc/req/v3/internal/netutil" + "github.com/imroc/req/v3/internal/transport" + reqtls "github.com/imroc/req/v3/pkg/tls" +) + +const ( + // transportDefaultConnFlow is how many connection-level flow control + // tokens we give the server at start-up, past the default 64k. + transportDefaultConnFlow = 1 << 30 + + // transportDefaultStreamFlow is how many stream-level flow + // control tokens we announce to the peer, and how many bytes + // we buffer per stream. + transportDefaultStreamFlow = 4 << 20 + + // initialMaxConcurrentStreams is a connections maxConcurrentStreams until + // it's received servers initial SETTINGS frame, which corresponds with the + // spec's minimum recommended value. + initialMaxConcurrentStreams = 100 + + // defaultMaxConcurrentStreams is a connections default maxConcurrentStreams + // if the server doesn't include one in its initial SETTINGS frame. + defaultMaxConcurrentStreams = 1000 +) + +// Transport is an HTTP/2 Transport. +// +// A Transport internally caches connections to servers. It is safe +// for concurrent use by multiple goroutines. +type Transport struct { + *transport.Options + + // DialTLS specifies an optional dial function for creating + // TLS connections for requests. + // + // If DialTLS is nil, tls.Dial is used. + // + // If the returned net.Conn has a ConnectionState method like tls.Conn, + // it will be used to set http.Response.TLS. + DialTLS func(network, addr string, cfg *tls.Config) (net.Conn, error) + + // ConnPool optionally specifies an alternate connection pool to use. + // If nil, the default is used. + ConnPool ClientConnPool + + // AllowHTTP, if true, permits HTTP/2 requests using the insecure, + // plain-text "http" scheme. Note that this does not enable h2c support. + AllowHTTP bool + + // MaxHeaderListSize is the http2 SETTINGS_MAX_HEADER_LIST_SIZE to + // send in the initial settings frame. It is how many bytes + // of response headers are allowed. Unlike the http2 spec, zero here + // means to use a default limit (currently 10MB). If you actually + // want to advertise an unlimited value to the peer, Transport + // interprets the highest possible value here (0xffffffff or 1<<32-1) + // to mean no limit. + MaxHeaderListSize uint32 + + // StrictMaxConcurrentStreams controls whether the server's + // SETTINGS_MAX_CONCURRENT_STREAMS should be respected + // globally. If false, new TCP connections are created to the + // server as needed to keep each under the per-connection + // SETTINGS_MAX_CONCURRENT_STREAMS limit. If true, the + // server's SETTINGS_MAX_CONCURRENT_STREAMS is interpreted as + // a global limit and callers of RoundTrip block when needed, + // waiting for their turn. + StrictMaxConcurrentStreams bool + + // IdleConnTimeout is the maximum amount of time an idle + // (keep-alive) connection will remain idle before closing + // itself. + // Zero means no limit. + IdleConnTimeout time.Duration + + // ReadIdleTimeout is the timeout after which a health check using ping + // frame will be carried out if no frame is received on the connection. + // Note that a ping response will is considered a received frame, so if + // there is no other traffic on the connection, the health check will + // be performed every ReadIdleTimeout interval. + // If zero, no health check is performed. + ReadIdleTimeout time.Duration + + // PingTimeout is the timeout after which the connection will be closed + // if a response to Ping is not received. + // Defaults to 15s. + PingTimeout time.Duration + + // WriteByteTimeout is the timeout after which the connection will be + // closed no data can be written to it. The timeout begins when data is + // available to write, and is extended whenever any bytes are written. + WriteByteTimeout time.Duration + + // CountError, if non-nil, is called on HTTP/2 transport errors. + // It's intended to increment a metric for monitoring, such + // as an expvar or Prometheus metric. + // The errType consists of only ASCII word characters. + CountError func(errType string) + + Settings []http2.Setting + + ConnectionFlow uint32 + HeaderPriority http2.PriorityParam + PriorityFrames []http2.PriorityFrame + + connPoolOnce sync.Once + connPoolOrDef ClientConnPool // non-nil version of ConnPool +} + +// newTimer creates a new time.Timer, or a synthetic timer in tests. +func (t *Transport) newTimer(d time.Duration) timer { + return timeTimer{time.NewTimer(d)} +} + +// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests. +func (t *Transport) afterFunc(d time.Duration, f func()) timer { + return timeTimer{time.AfterFunc(d, f)} +} + +func (t *Transport) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) { + return context.WithTimeout(ctx, d) +} + +func (t *Transport) maxHeaderListSize() uint32 { + if t.MaxHeaderListSize == 0 { + return 10 << 20 + } + if t.MaxHeaderListSize == 0xffffffff { + return 0 + } + return t.MaxHeaderListSize +} + +func (t *Transport) pingTimeout() time.Duration { + if t.PingTimeout == 0 { + return 15 * time.Second + } + return t.PingTimeout +} + +func (t *Transport) connPool() ClientConnPool { + t.connPoolOnce.Do(t.initConnPool) + return t.connPoolOrDef +} + +func (t *Transport) initConnPool() { + if t.ConnPool != nil { + t.connPoolOrDef = t.ConnPool + } else { + t.connPoolOrDef = &clientConnPool{t: t} + } +} + +// ClientConn is the state of a single HTTP/2 client connection to an +// HTTP/2 server. +type ClientConn struct { + t *Transport + tconn net.Conn // usually TLSConn, except specialized impls + tlsState *tls.ConnectionState // nil only for specialized impls + reused uint32 // whether conn is being reused; atomic + singleUse bool // whether being used for a single http.Request + getConnCalled bool // used by clientConnPool + + // readLoop goroutine fields: + readerDone chan struct{} // closed on error + readerErr error // set before readerDone is closed + + idleTimeout time.Duration // or 0 for never + idleTimer timer + + mu sync.Mutex // guards following + cond *sync.Cond // hold mu; broadcast on flow/closed changes + flow outflow // our conn-level flow control quota (cs.outflow is per stream) + inflow inflow // peer's conn-level flow control + doNotReuse bool // whether conn is marked to not be reused for any future requests + closing bool + closed bool + seenSettings bool // true if we've seen a settings frame, false otherwise + wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back + goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received + goAwayDebug string // goAway frame's debug data, retained as a string + streams map[uint32]*clientStream // client-initiated + streamsReserved int // incr by ReserveNewRequest; decr on RoundTrip + nextStreamID uint32 + pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams + pings map[[8]byte]chan struct{} // in flight ping data to notification channel + br *bufio.Reader + lastActive time.Time + lastIdle time.Time // time last idle + // Settings from peer: (also guarded by wmu) + maxFrameSize uint32 + maxConcurrentStreams uint32 + peerMaxHeaderListSize uint64 + initialWindowSize uint32 + + // reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests. + // Write to reqHeaderMu to lock it, read from it to unlock. + // Lock reqmu BEFORE mu or wmu. + reqHeaderMu chan struct{} + + // wmu is held while writing. + // Acquire BEFORE mu when holding both, to avoid blocking mu on network writes. + // Only acquire both at the same time when changing peer settings. + wmu sync.Mutex + bw *bufio.Writer + fr *Framer + werr error // first write error that has occurred + hbuf bytes.Buffer // HPACK encoder writes into this + henc *hpack.Encoder +} + +// clientStream is the state for a single HTTP/2 stream. One of these +// is created for each Transport.RoundTrip call. +type clientStream struct { + currentRequest *http.Request + cc *ClientConn + + // Fields of Request that we may access even after the response body is closed. + ctx context.Context + reqCancel <-chan struct{} + + trace *httptrace.ClientTrace // or nil + ID uint32 + bufPipe pipe // buffered pipe with the flow-controlled response payload + requestedGzip bool + isHead bool + + abortOnce sync.Once + abort chan struct{} // closed to signal stream should end immediately + abortErr error // set if abort is closed + + peerClosed chan struct{} // closed when the peer sends an END_STREAM flag + donec chan struct{} // closed after the stream is in the closed state + on100 chan struct{} // buffered; written to if a 100 is received + + respHeaderRecv chan struct{} // closed when headers are received + res *http.Response // set if respHeaderRecv is closed + + flow outflow // guarded by cc.mu + inflow inflow // guarded by cc.mu + bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read + readErr error // sticky read error; owned by transportResponseBody.Read + + reqBody io.ReadCloser + reqBodyContentLength int64 // -1 means unknown + reqBodyClosed chan struct{} // guarded by cc.mu; non-nil on Close, closed when done + + // owned by writeRequest: + sentEndStream bool // sent an END_STREAM flag to the peer + sentHeaders bool + + // owned by clientConnReadLoop: + firstByte bool // got the first response byte + pastHeaders bool // got first MetaHeadersFrame (actual headers) + pastTrailers bool // got optional second MetaHeadersFrame (trailers) + num1xx uint8 // number of 1xx responses seen + readClosed bool // peer sent an END_STREAM flag + readAborted bool // read loop reset the stream + + trailer http.Header // accumulated trailers + resTrailer *http.Header // client's Response.Trailer +} + +var got1xxFuncForTests func(int, textproto.MIMEHeader) error + +// get1xxTraceFunc returns the value of request's httptrace.ClientTrace.Got1xxResponse func, +// if any. It returns nil if not set or if the Go version is too old. +func (cs *clientStream) get1xxTraceFunc() func(int, textproto.MIMEHeader) error { + if fn := got1xxFuncForTests; fn != nil { + return fn + } + return traceGot1xxResponseFunc(cs.trace) +} + +func (cs *clientStream) abortStream(err error) { + cs.cc.mu.Lock() + defer cs.cc.mu.Unlock() + cs.abortStreamLocked(err) +} + +func (cs *clientStream) abortStreamLocked(err error) { + cs.abortOnce.Do(func() { + cs.abortErr = err + close(cs.abort) + }) + if cs.reqBody != nil { + cs.closeReqBodyLocked() + } + // TODO(dneil): Clean up tests where cs.cc.cond is nil. + if cs.cc.cond != nil { + // Wake up writeRequestBody if it is waiting on flow control. + cs.cc.cond.Broadcast() + } +} + +func (cs *clientStream) abortRequestBodyWrite() { + cc := cs.cc + cc.mu.Lock() + defer cc.mu.Unlock() + if cs.reqBody != nil && cs.reqBodyClosed == nil { + cs.closeReqBodyLocked() + cc.cond.Broadcast() + } +} + +func (cs *clientStream) closeReqBodyLocked() { + if cs.reqBodyClosed != nil { + return + } + cs.reqBodyClosed = make(chan struct{}) + reqBodyClosed := cs.reqBodyClosed + go func() { + cs.reqBody.Close() + close(reqBodyClosed) + }() +} + +type stickyErrWriter struct { + conn net.Conn + timeout time.Duration + err *error +} + +func (sew stickyErrWriter) Write(p []byte) (n int, err error) { + if *sew.err != nil { + return 0, *sew.err + } + for { + if sew.timeout != 0 { + sew.conn.SetWriteDeadline(time.Now().Add(sew.timeout)) + } + nn, err := sew.conn.Write(p[n:]) + n += nn + if n < len(p) && nn > 0 && errors.Is(err, os.ErrDeadlineExceeded) { + // Keep extending the deadline so long as we're making progress. + continue + } + if sew.timeout != 0 { + sew.conn.SetWriteDeadline(time.Time{}) + } + *sew.err = err + return n, err + } +} + +// noCachedConnError is the concrete type of ErrNoCachedConn, which +// needs to be detected by net/http regardless of whether it's its +// bundled version (in h2_bundle.go with a rewritten type name) or +// from a user's x/net/http2. As such, as it has a unique method name +// (IsHTTP2NoCachedConnError) that net/http sniffs for via func +// IsNoCachedConnError. +type noCachedConnError struct{} + +func (noCachedConnError) IsHTTP2NoCachedConnError() {} + +func (noCachedConnError) Error() string { return "http2: no cached connection was available" } + +// IsNoCachedConnError reports whether err is of type noCachedConnError +// or its equivalent renamed type in net/http2's h2_bundle.go. Both types +// may coexist in the same running program. +func IsNoCachedConnError(err error) bool { + _, ok := err.(interface{ IsHTTP2NoCachedConnError() }) + return ok +} + +var ErrNoCachedConn error = noCachedConnError{} + +// RoundTripOpt are options for the Transport.RoundTripOpt method. +type RoundTripOpt struct { + // OnlyCachedConn controls whether RoundTripOpt may + // create a new TCP connection. If set true and + // no cached connection is available, RoundTripOpt + // will return ErrNoCachedConn. + OnlyCachedConn bool +} + +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + return t.RoundTripOpt(req, RoundTripOpt{}) +} + +func (t *Transport) RoundTripOnlyCachedConn(req *http.Request) (*http.Response, error) { + return t.RoundTripOpt(req, RoundTripOpt{OnlyCachedConn: true}) +} + +// authorityAddr returns a given authority (a host/IP, or host:port / ip:port) +// and returns a host:port. The port 443 is added if needed. +func authorityAddr(scheme string, authority string) (addr string) { + host, port, err := net.SplitHostPort(authority) + if err != nil { // authority didn't have a port + host = authority + port = "" + } + if port == "" { // authority's port was empty + port = "443" + if scheme == "http" { + port = "80" + } + } + if a, err := idna.ToASCII(host); err == nil { + host = a + } + // IPv6 address literal, without a port: + if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") { + return host + ":" + port + } + return net.JoinHostPort(host, port) +} + +func (t *Transport) AddConn(conn net.Conn, addr string) (used bool, err error) { + used, err = t.connPool().AddConnIfNeeded(addr, t, conn) + return +} + +// RoundTripOpt is like RoundTrip, but takes options. +func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { + if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) { + return nil, errors.New("http2: unsupported scheme") + } + + addr := netutil.AuthorityAddr(req.URL.Scheme, req.URL.Host) + var cc *ClientConn + var err error + if opt.OnlyCachedConn { + cc, err = t.connPool().GetClientConn(req, addr, false) + if err != nil { + return nil, err + } + traceGotConn(req, cc, true) + return cc.RoundTrip(req) + } + for retry := 0; ; retry++ { + cc, err = t.connPool().GetClientConn(req, addr, true) + if err != nil { + t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err) + return nil, err + } + reused := !atomic.CompareAndSwapUint32(&cc.reused, 0, 1) + traceGotConn(req, cc, reused) + res, err := cc.RoundTrip(req) + if err != nil && retry <= 6 { + roundTripErr := err + if req, err = shouldRetryRequest(req, err); err == nil { + // After the first retry, do exponential backoff with 10% jitter. + if retry == 0 { + t.vlogf("RoundTrip retrying after failure: %v", roundTripErr) + continue + } + backoff := float64(uint(1) << (uint(retry) - 1)) + backoff += backoff * (0.1 * mathrand.Float64()) + d := time.Second * time.Duration(backoff) + tm := t.newTimer(d) + select { + case <-tm.C(): + t.vlogf("RoundTrip retrying after failure: %v", roundTripErr) + continue + case <-req.Context().Done(): + tm.Stop() + err = req.Context().Err() + } + } + } + if err != nil { + t.vlogf("RoundTrip failure: %v", err) + return nil, err + } + return res, nil + } +} + +// CloseIdleConnections closes any connections which were previously +// connected from previous requests but are now sitting idle. +// It does not interrupt any connections currently in use. +func (t *Transport) CloseIdleConnections() { + t.connPool().CloseIdleConnections() +} + +var ( + errClientConnClosed = errors.New("http2: client conn is closed") + errClientConnUnusable = errors.New("http2: client conn not usable") + errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") +) + +// shouldRetryRequest is called by RoundTrip when a request fails to get +// response headers. It is always called with a non-nil error. +// It returns either a request to retry (either the same request, or a +// modified clone), or an error if the request can't be replayed. +func shouldRetryRequest(req *http.Request, err error) (*http.Request, error) { + if !canRetryError(err) { + return nil, err + } + // If the Body is nil (or http.NoBody), it's safe to reuse + // this request and its Body. + if req.Body == nil || req.Body == http.NoBody { + return req, nil + } + + // If the request body can be reset back to its original + // state via the optional req.GetBody, do that. + if req.GetBody != nil { + body, err := req.GetBody() + if err != nil { + return nil, err + } + newReq := *req + newReq.Body = body + return &newReq, nil + } + + // The Request.Body can't reset back to the beginning, but we + // don't seem to have started to read from it yet, so reuse + // the request directly. + if err == errClientConnUnusable { + return req, nil + } + + return nil, fmt.Errorf("http2: Transport: cannot retry err [%v] after Request.Body was written; define Request.GetBody to avoid this error", err) +} + +func canRetryError(err error) bool { + if err == errClientConnUnusable || err == errClientConnGotGoAway { + return true + } + if se, ok := err.(StreamError); ok { + if se.Code == ErrCodeProtocol && se.Cause == errFromPeer { + // See golang/go#47635, golang/go#42777 + return true + } + return se.Code == ErrCodeRefusedStream + } + return false +} + +func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse bool) (*ClientConn, error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + tconn, err := t.dialTLS(ctx)("tcp", addr, t.newTLSConfig(host)) + if err != nil { + return nil, err + } + return t.newClientConn(tconn, singleUse) +} + +func (t *Transport) newTLSConfig(host string) *tls.Config { + cfg := new(tls.Config) + if c := t.TLSClientConfig; c != nil { + *cfg = *c.Clone() + } + if !strSliceContains(cfg.NextProtos, NextProtoTLS) { + cfg.NextProtos = append([]string{NextProtoTLS}, cfg.NextProtos...) + } + if cfg.ServerName == "" { + cfg.ServerName = host + } + return cfg +} + +var zeroDialer net.Dialer + +type tlsHandshakeTimeoutError struct{} + +func (tlsHandshakeTimeoutError) Timeout() bool { return true } +func (tlsHandshakeTimeoutError) Temporary() bool { return true } +func (tlsHandshakeTimeoutError) Error() string { return "net/http: TLS handshake timeout" } + +// dialTLSWithContext uses tls.Dialer, added in Go 1.15, to open a TLS +// connection. +func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (reqtls.Conn, error) { + if t.TLSHandshakeContext != nil { + conn, err := zeroDialer.DialContext(ctx, network, addr) + if err != nil { + return nil, err + } + var firstTLSHost string + if firstTLSHost, _, err = net.SplitHostPort(addr); err != nil { + return nil, err + } + trace := httptrace.ContextClientTrace(ctx) + errc := make(chan error, 2) + var timer *time.Timer // for canceling TLS handshake + if d := t.TLSHandshakeTimeout; d != 0 { + timer = time.AfterFunc(d, func() { + errc <- tlsHandshakeTimeoutError{} + }) + } + go func() { + if trace != nil && trace.TLSHandshakeStart != nil { + trace.TLSHandshakeStart() + } + tlsCn, tlsState, err := t.TLSHandshakeContext(ctx, firstTLSHost, conn) + if err != nil { + if timer != nil { + timer.Stop() + } + if trace != nil && trace.TLSHandshakeDone != nil { + trace.TLSHandshakeDone(tls.ConnectionState{}, err) + } + } else { + conn = tlsCn + if trace != nil && trace.TLSHandshakeDone != nil { + trace.TLSHandshakeDone(*tlsState, nil) + } + } + errc <- err + }() + if err := <-errc; err != nil { + conn.Close() + return nil, err + } else { + tlsCn := conn.(reqtls.Conn) + return tlsCn, nil + } + } else { + dialer := &tls.Dialer{ + Config: cfg, + } + conn, err := dialer.DialContext(ctx, network, addr) + if err != nil { + return nil, err + } + tlsCn := conn.(reqtls.Conn) + return tlsCn, nil + } +} + +func (t *Transport) dialTLS(ctx context.Context) func(string, string, *tls.Config) (net.Conn, error) { + if t.DialTLS != nil { + return t.DialTLS + } + if t.DialTLSContext != nil { + return func(network string, addr string, cfg *tls.Config) (net.Conn, error) { + return t.DialTLSContext(ctx, network, addr) + } + } + return func(network, addr string, cfg *tls.Config) (net.Conn, error) { + tlsCn, err := t.dialTLSWithContext(ctx, network, addr, cfg) + if err != nil { + return nil, err + } + state := tlsCn.ConnectionState() + if p := state.NegotiatedProtocol; p != NextProtoTLS { + return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", p, NextProtoTLS) + } + if !state.NegotiatedProtocolIsMutual { + return nil, errors.New("http2: could not negotiate protocol mutually") + } + return tlsCn, nil + } +} + +func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { + return t.newClientConn(c, t.DisableKeepAlives) +} + +func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { + cc := &ClientConn{ + t: t, + tconn: c, + readerDone: make(chan struct{}), + nextStreamID: 1, + maxFrameSize: 16 << 10, // spec default + initialWindowSize: 65535, // spec default + maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings. + peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. + streams: make(map[uint32]*clientStream), + singleUse: singleUse, + wantSettingsAck: true, + pings: make(map[[8]byte]chan struct{}), + reqHeaderMu: make(chan struct{}, 1), + } + if VerboseLogs { + t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) + } + + cc.cond = sync.NewCond(&cc.mu) + + var headerTableSize uint32 = initialHeaderTableSize + for _, setting := range t.Settings { + switch setting.ID { + case http2.SettingMaxFrameSize: + cc.maxFrameSize = setting.Val + case http2.SettingMaxHeaderListSize: + t.MaxHeaderListSize = setting.Val + case http2.SettingHeaderTableSize: + headerTableSize = setting.Val + } + } + + cc.flow.add(initialWindowSize) + + // TODO: adjust this writer size to account for frame size + + // MTU + crypto/tls record padding. + cc.bw = bufio.NewWriter(stickyErrWriter{ + conn: c, + timeout: t.WriteByteTimeout, + err: &cc.werr, + }) + cc.br = bufio.NewReader(c) + cc.fr = NewFramer(cc.bw, cc.br) + cc.fr.cc = cc + if t.CountError != nil { + cc.fr.countError = t.CountError + } + cc.fr.ReadMetaHeaders = hpack.NewDecoder(headerTableSize, nil) + cc.fr.MaxHeaderListSize = t.maxHeaderListSize() + + // TODO: SetMaxDynamicTableSize, SetMaxDynamicTableSizeLimit on + // henc in response to SETTINGS frames? + cc.henc = hpack.NewEncoder(&cc.hbuf) + + if cs, ok := c.(connectionStater); ok { + state := cs.ConnectionState() + cc.tlsState = &state + } + + var initialSettings []http2.Setting + if len(t.Settings) > 0 { + initialSettings = t.Settings + } else { + initialSettings = []http2.Setting{ + {ID: http2.SettingEnablePush, Val: 0}, + {ID: http2.SettingInitialWindowSize, Val: transportDefaultStreamFlow}, + } + if max := t.maxHeaderListSize(); max != 0 { + initialSettings = append(initialSettings, http2.Setting{ID: http2.SettingMaxHeaderListSize, Val: max}) + } + } + + cc.bw.Write(clientPreface) + cc.fr.WriteSettings(initialSettings...) + connFlow := cc.t.ConnectionFlow + if connFlow < 1 { + connFlow = transportDefaultConnFlow + } + cc.fr.WriteWindowUpdate(0, connFlow) + + for _, p := range t.PriorityFrames { + cc.fr.WritePriority(p.StreamID, p.PriorityParam) + cc.nextStreamID = p.StreamID + 2 + } + + cc.inflow.init(int32(connFlow) + initialWindowSize) + cc.bw.Flush() + if cc.werr != nil { + cc.Close() + return nil, cc.werr + } + + // Start the idle timer after the connection is fully initialized. + if d := t.IdleConnTimeout; d != 0 { + cc.idleTimeout = d + cc.idleTimer = t.afterFunc(d, cc.onIdleTimeout) + } + + go cc.readLoop() + return cc, nil +} + +func (cc *ClientConn) healthCheck() { + pingTimeout := cc.t.pingTimeout() + // We don't need to periodically ping in the health check, because the readLoop of ClientConn will + // trigger the healthCheck again if there is no frame received. + ctx, cancel := cc.t.contextWithTimeout(context.Background(), pingTimeout) + defer cancel() + cc.vlogf("http2: Transport sending health check") + err := cc.Ping(ctx) + if err != nil { + cc.vlogf("http2: Transport health check failure: %v", err) + cc.closeForLostPing() + return + } else { + cc.vlogf("http2: Transport health check success") + } +} + +// SetDoNotReuse marks cc as not reusable for future HTTP requests. +func (cc *ClientConn) SetDoNotReuse() { + cc.mu.Lock() + defer cc.mu.Unlock() + cc.doNotReuse = true +} + +func (cc *ClientConn) setGoAway(f *GoAwayFrame) { + cc.mu.Lock() + defer cc.mu.Unlock() + + old := cc.goAway + cc.goAway = f + + // Merge the previous and current GoAway error frames. + if cc.goAwayDebug == "" { + cc.goAwayDebug = string(f.DebugData()) + } + if old != nil && old.ErrCode != ErrCodeNo { + cc.goAway.ErrCode = old.ErrCode + } + last := f.LastStreamID + for streamID, cs := range cc.streams { + if streamID <= last { + // The server's GOAWAY indicates that it received this stream. + // It will either finish processing it, or close the connection + // without doing so. Either way, leave the stream alone for now. + continue + } + if streamID == 1 && cc.goAway.ErrCode != ErrCodeNo { + // Don't retry the first stream on a connection if we get a non-NO error. + // If the server is sending an error on a new connection, + // retrying the request on a new one probably isn't going to work. + cs.abortStreamLocked(fmt.Errorf("http2: Transport received GOAWAY from server ErrCode:%v", cc.goAway.ErrCode)) + } else { + // Aborting the stream with errClentConnGotGoAway indicates that + // the request should be retried on a new connection. + cs.abortStreamLocked(errClientConnGotGoAway) + } + } +} + +// CanTakeNewRequest reports whether the connection can take a new request, +// meaning it has not been closed or received or sent a GOAWAY. +// +// If the caller is going to immediately make a new request on this +// connection, use ReserveNewRequest instead. +func (cc *ClientConn) CanTakeNewRequest() bool { + cc.mu.Lock() + defer cc.mu.Unlock() + return cc.canTakeNewRequestLocked() +} + +// ReserveNewRequest is like CanTakeNewRequest but also reserves a +// concurrent stream in cc. The reservation is decremented on the +// next call to RoundTrip. +func (cc *ClientConn) ReserveNewRequest() bool { + cc.mu.Lock() + defer cc.mu.Unlock() + if st := cc.idleStateLocked(); !st.canTakeNewRequest { + return false + } + cc.streamsReserved++ + return true +} + +// ClientConnState describes the state of a ClientConn. +type ClientConnState struct { + // Closed is whether the connection is closed. + Closed bool + + // Closing is whether the connection is in the process of + // closing. It may be closing due to shutdown, being a + // single-use connection, being marked as DoNotReuse, or + // having received a GOAWAY frame. + Closing bool + + // StreamsActive is how many streams are active. + StreamsActive int + + // StreamsReserved is how many streams have been reserved via + // ClientConn.ReserveNewRequest. + StreamsReserved int + + // StreamsPending is how many requests have been sent in excess + // of the peer's advertised MaxConcurrentStreams setting and + // are waiting for other streams to complete. + StreamsPending int + + // MaxConcurrentStreams is how many concurrent streams the + // peer advertised as acceptable. Zero means no SETTINGS + // frame has been received yet. + MaxConcurrentStreams uint32 + + // LastIdle, if non-zero, is when the connection last + // transitioned to idle state. + LastIdle time.Time +} + +// clientConnIdleState describes the suitability of a client +// connection to initiate a new RoundTrip request. +type clientConnIdleState struct { + canTakeNewRequest bool +} + +func (cc *ClientConn) idleState() clientConnIdleState { + cc.mu.Lock() + defer cc.mu.Unlock() + return cc.idleStateLocked() +} + +func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) { + if cc.singleUse && cc.nextStreamID > 1 { + return + } + var maxConcurrentOkay bool + if cc.t.StrictMaxConcurrentStreams { + // We'll tell the caller we can take a new request to + // prevent the caller from dialing a new TCP + // connection, but then we'll block later before + // writing it. + maxConcurrentOkay = true + } else { + maxConcurrentOkay = int64(len(cc.streams)+cc.streamsReserved+1) <= int64(cc.maxConcurrentStreams) + } + + st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing && maxConcurrentOkay && + !cc.doNotReuse && + int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 && + !cc.tooIdleLocked() + return +} + +func (cc *ClientConn) canTakeNewRequestLocked() bool { + st := cc.idleStateLocked() + return st.canTakeNewRequest +} + +// tooIdleLocked reports whether this connection has been sitting idle +// for too much wall time. +func (cc *ClientConn) tooIdleLocked() bool { + // The Round(0) strips the monotonic clock reading so the + // times are compared based on their wall time. We don't want + // to reuse a connection that's been sitting idle during + // VM/laptop suspend if monotonic time was also frozen. + return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && time.Since(cc.lastIdle.Round(0)) > cc.idleTimeout +} + +// onIdleTimeout is called from a time.AfterFunc goroutine. It will +// only be called when we're idle, but because we're coming from a new +// goroutine, there could be a new request coming in at the same time, +// so this simply calls the synchronized closeIfIdle to shut down this +// connection. The timer could just call closeIfIdle, but this is more +// clear. +func (cc *ClientConn) onIdleTimeout() { + cc.closeIfIdle() +} + +func (cc *ClientConn) closeConn() { + t := time.AfterFunc(250*time.Millisecond, cc.forceCloseConn) + defer t.Stop() + cc.tconn.Close() +} + +// A tls.Conn.Close can hang for a long time if the peer is unresponsive. +// Try to shut it down more aggressively. +func (cc *ClientConn) forceCloseConn() { + tc, ok := cc.tconn.(*tls.Conn) + if !ok { + return + } + if nc := tc.NetConn(); nc != nil { + nc.Close() + } +} + +func (cc *ClientConn) closeIfIdle() { + cc.mu.Lock() + if len(cc.streams) > 0 || cc.streamsReserved > 0 { + cc.mu.Unlock() + return + } + cc.closed = true + nextID := cc.nextStreamID + // TODO: do clients send GOAWAY too? maybe? Just Close: + cc.mu.Unlock() + + if VerboseLogs { + cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, nextID-2) + } + cc.closeConn() +} + +func (cc *ClientConn) isDoNotReuseAndIdle() bool { + cc.mu.Lock() + defer cc.mu.Unlock() + return cc.doNotReuse && len(cc.streams) == 0 +} + +var shutdownEnterWaitStateHook = func() {} + +// Shutdown gracefully closes the client connection, waiting for running streams to complete. +func (cc *ClientConn) Shutdown(ctx context.Context) error { + if err := cc.sendGoAway(); err != nil { + return err + } + // Wait for all in-flight streams to complete or connection to close + done := make(chan struct{}) + cancelled := false // guarded by cc.mu + go func() { + cc.mu.Lock() + defer cc.mu.Unlock() + for { + if len(cc.streams) == 0 || cc.closed { + cc.closed = true + close(done) + break + } + if cancelled { + break + } + cc.cond.Wait() + } + }() + shutdownEnterWaitStateHook() + select { + case <-done: + cc.closeConn() + return nil + case <-ctx.Done(): + cc.mu.Lock() + // Free the goroutine above + cancelled = true + cc.cond.Broadcast() + cc.mu.Unlock() + return ctx.Err() + } +} + +func (cc *ClientConn) sendGoAway() error { + cc.mu.Lock() + closing := cc.closing + cc.closing = true + maxStreamID := cc.nextStreamID + cc.mu.Unlock() + if closing { + // GOAWAY sent already + return nil + } + + cc.wmu.Lock() + defer cc.wmu.Unlock() + // Send a graceful shutdown frame to server + if err := cc.fr.WriteGoAway(maxStreamID, ErrCodeNo, nil); err != nil { + return err + } + if err := cc.bw.Flush(); err != nil { + return err + } + // Prevent new requests + return nil +} + +// closes the client connection immediately. In-flight requests are interrupted. +// err is sent to streams. +func (cc *ClientConn) closeForError(err error) { + cc.mu.Lock() + cc.closed = true + for _, cs := range cc.streams { + cs.abortStreamLocked(err) + } + cc.cond.Broadcast() + cc.mu.Unlock() + cc.closeConn() +} + +// Close closes the client connection immediately. +// +// In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead. +func (cc *ClientConn) Close() error { + err := errors.New("http2: client connection force closed via ClientConn.Close") + cc.closeForError(err) + return nil +} + +// closes the client connection immediately. In-flight requests are interrupted. +func (cc *ClientConn) closeForLostPing() { + err := errors.New("http2: client connection lost") + if f := cc.t.CountError; f != nil { + f("conn_close_lost_ping") + } + cc.closeForError(err) +} + +func commaSeparatedTrailers(req *http.Request) (string, error) { + keys := make([]string, 0, len(req.Trailer)) + for k := range req.Trailer { + k = canonicalHeader(k) + switch k { + case "Transfer-Encoding", "Trailer", "Content-Length": + return "", fmt.Errorf("invalid Trailer key %q", k) + } + keys = append(keys, k) + } + if len(keys) > 0 { + sort.Strings(keys) + return strings.Join(keys, ","), nil + } + return "", nil +} + +func (cc *ClientConn) responseHeaderTimeout() time.Duration { + return cc.t.ResponseHeaderTimeout +} + +// checkConnHeaders checks whether req has any invalid connection-level headers. +// per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields. +// Certain headers are special-cased as okay but not transmitted later. +func checkConnHeaders(req *http.Request) error { + if v := req.Header.Get("Upgrade"); v != "" { + return fmt.Errorf("http2: invalid Upgrade request header: %q", req.Header["Upgrade"]) + } + if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") { + return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv) + } + if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !ascii.EqualFold(vv[0], "close") && !ascii.EqualFold(vv[0], "keep-alive")) { + return fmt.Errorf("http2: invalid Connection request header: %q", vv) + } + return nil +} + +// actualContentLength returns a sanitized version of +// req.ContentLength, where 0 actually means zero (not unknown) and -1 +// means unknown. +func actualContentLength(req *http.Request) int64 { + if req.Body == nil || req.Body == http.NoBody { + return 0 + } + if req.ContentLength != 0 { + return req.ContentLength + } + return -1 +} + +func (cc *ClientConn) decrStreamReservations() { + cc.mu.Lock() + defer cc.mu.Unlock() + cc.decrStreamReservationsLocked() +} + +func (cc *ClientConn) decrStreamReservationsLocked() { + if cc.streamsReserved > 0 { + cc.streamsReserved-- + } +} + +func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { + return cc.roundTrip(req, nil) +} + +func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) (*http.Response, error) { + if cc.t != nil && cc.t.Debugf != nil { + cc.t.Debugf("HTTP/2 %s %s", req.Method, req.URL.String()) + } + ctx := req.Context() + cs := &clientStream{ + currentRequest: req, + cc: cc, + ctx: ctx, + reqCancel: req.Cancel, + isHead: req.Method == "HEAD", + reqBody: req.Body, + reqBodyContentLength: actualContentLength(req), + trace: httptrace.ContextClientTrace(ctx), + peerClosed: make(chan struct{}), + abort: make(chan struct{}), + respHeaderRecv: make(chan struct{}), + donec: make(chan struct{}), + } + + // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? + if !cc.t.DisableCompression && + req.Header.Get("Accept-Encoding") == "" && + req.Header.Get("Range") == "" && + !cs.isHead { + // Request gzip only, not deflate. Deflate is ambiguous and + // not as universally supported anyway. + // See: https://zlib.net/zlib_faq.html#faq39 + // + // Note that we don't request this for HEAD requests, + // due to a bug in nginx: + // http://trac.nginx.org/nginx/ticket/358 + // https://golang.org/issue/5522 + // + // We don't request gzip if the request is for a range, since + // auto-decoding a portion of a gzipped document will just fail + // anyway. See https://golang.org/issue/8923 + cs.requestedGzip = true + } + go cs.doRequest(req, streamf) + + waitDone := func() error { + select { + case <-cs.donec: + return nil + case <-ctx.Done(): + return ctx.Err() + case <-cs.reqCancel: + return common.ErrRequestCanceled + } + } + + handleResponseHeaders := func() (*http.Response, error) { + res := cs.res + if res.StatusCode > 299 { + // On error or status code 3xx, 4xx, 5xx, etc abort any + // ongoing write, assuming that the server doesn't care + // about our request body. If the server replied with 1xx or + // 2xx, however, then assume the server DOES potentially + // want our body (e.g. full-duplex streaming: + // golang.org/issue/13444). If it turns out the server + // doesn't, they'll RST_STREAM us soon enough. This is a + // heuristic to avoid adding knobs to Transport. Hopefully + // we can keep it. + cs.abortRequestBodyWrite() + } + res.Request = req + res.TLS = cc.tlsState + if res.Body == noBody && actualContentLength(req) == 0 { + // If there isn't a request or response body still being + // written, then wait for the stream to be closed before + // RoundTrip returns. + if err := waitDone(); err != nil { + return nil, err + } + } + return res, nil + } + + cancelRequest := func(cs *clientStream, err error) error { + cs.cc.mu.Lock() + bodyClosed := cs.reqBodyClosed + cs.cc.mu.Unlock() + // Wait for the request body to be closed. + // + // If nothing closed the body before now, abortStreamLocked + // will have started a goroutine to close it. + // + // Closing the body before returning avoids a race condition + // with net/http checking its readTrackingBody to see if the + // body was read from or closed. See golang/go#60041. + // + // The body is closed in a separate goroutine without the + // connection mutex held, but dropping the mutex before waiting + // will keep us from holding it indefinitely if the body + // close is slow for some reason. + if bodyClosed != nil { + <-bodyClosed + } + return err + } + + for { + select { + case <-cs.respHeaderRecv: + return handleResponseHeaders() + case <-cs.abort: + select { + case <-cs.respHeaderRecv: + // If both cs.respHeaderRecv and cs.abort are signaling, + // pick respHeaderRecv. The server probably wrote the + // response and immediately reset the stream. + // golang.org/issue/49645 + return handleResponseHeaders() + default: + waitDone() + return nil, cs.abortErr + } + case <-ctx.Done(): + err := ctx.Err() + cs.abortStream(err) + return nil, cancelRequest(cs, err) + case <-cs.reqCancel: + cs.abortStream(common.ErrRequestCanceled) + return nil, cancelRequest(cs, common.ErrRequestCanceled) + } + } +} + +// doRequest runs for the duration of the request lifetime. +// +// It sends the request and performs post-request cleanup (closing Request.Body, etc.). +func (cs *clientStream) doRequest(req *http.Request, streamf func(*clientStream)) { + err := cs.writeRequest(req, streamf) + cs.cleanupWriteRequest(err) +} + +// writeRequest sends a request. +// +// It returns nil after the request is written, the response read, +// and the request stream is half-closed by the peer. +// +// It returns non-nil if the request ends otherwise. +// If the returned error is StreamError, the error Code may be used in resetting the stream. +func (cs *clientStream) writeRequest(req *http.Request, streamf func(*clientStream)) (err error) { + cc := cs.cc + ctx := cs.ctx + + if err := checkConnHeaders(req); err != nil { + return err + } + + // Acquire the new-request lock by writing to reqHeaderMu. + // This lock guards the critical section covering allocating a new stream ID + // (requires mu) and creating the stream (requires wmu). + if cc.reqHeaderMu == nil { + panic("RoundTrip on uninitialized ClientConn") // for tests + } + select { + case cc.reqHeaderMu <- struct{}{}: + case <-cs.reqCancel: + return common.ErrRequestCanceled + case <-ctx.Done(): + return ctx.Err() + } + + cc.mu.Lock() + if cc.idleTimer != nil { + cc.idleTimer.Stop() + } + cc.decrStreamReservationsLocked() + if err := cc.awaitOpenSlotForStreamLocked(cs); err != nil { + cc.mu.Unlock() + <-cc.reqHeaderMu + return err + } + cc.addStreamLocked(cs) // assigns stream ID + if isConnectionCloseRequest(req) { + cc.doNotReuse = true + } + cc.mu.Unlock() + + if streamf != nil { + streamf(cs) + } + + continueTimeout := cc.t.ExpectContinueTimeout + if continueTimeout != 0 { + if !httpguts.HeaderValuesContainsToken(req.Header["Expect"], "100-continue") { + continueTimeout = 0 + } else { + cs.on100 = make(chan struct{}, 1) + } + } + + var dumps []*dump.Dumper + if t := cs.cc.t; t != nil { + dumps = dump.GetDumpers(req.Context(), t.Dump) + } + + // Past this point (where we send request headers), it is possible for + // RoundTrip to return successfully. Since the RoundTrip contract permits + // the caller to "mutate or reuse" the Request after closing the Response's Body, + // we must take care when referencing the Request from here on. + err = cs.encodeAndWriteHeaders(req, dumps) + <-cc.reqHeaderMu + if err != nil { + return err + } + + bodyDumps := []*dump.Dumper{} + for _, dump := range dumps { + if dump.RequestBody() { + bodyDumps = append(bodyDumps, dump) + } + } + + hasBody := cs.reqBodyContentLength != 0 + if !hasBody { + cs.sentEndStream = true + } else { + if continueTimeout != 0 { + traceWait100Continue(cs.trace) + timer := time.NewTimer(continueTimeout) + select { + case <-timer.C: + err = nil + case <-cs.on100: + err = nil + case <-cs.abort: + err = cs.abortErr + case <-ctx.Done(): + err = ctx.Err() + case <-cs.reqCancel: + err = common.ErrRequestCanceled + } + timer.Stop() + if err != nil { + traceWroteRequest(cs.trace, err) + return err + } + } + if err = cs.writeRequestBody(req, bodyDumps); err != nil { + if err != errStopReqBodyWrite { + traceWroteRequest(cs.trace, err) + return err + } + } else { + cs.sentEndStream = true + for _, dump := range bodyDumps { + dump.DumpDefault([]byte("\r\n\r\n")) + } + } + } + + traceWroteRequest(cs.trace, err) + + var respHeaderTimer <-chan time.Time + var respHeaderRecv chan struct{} + if d := cc.responseHeaderTimeout(); d != 0 { + timer := cc.t.newTimer(d) + defer timer.Stop() + respHeaderTimer = timer.C() + respHeaderRecv = cs.respHeaderRecv + } + // Wait until the peer half-closes its end of the stream, + // or until the request is aborted (via context, error, or otherwise), + // whichever comes first. + for { + select { + case <-cs.peerClosed: + return nil + case <-respHeaderTimer: + return errH2Timeout + case <-respHeaderRecv: + respHeaderRecv = nil + respHeaderTimer = nil // keep waiting for END_STREAM + case <-cs.abort: + return cs.abortErr + case <-ctx.Done(): + return ctx.Err() + case <-cs.reqCancel: + return common.ErrRequestCanceled + } + } +} + +func (cs *clientStream) encodeAndWriteHeaders(req *http.Request, dumps []*dump.Dumper) error { + cc := cs.cc + ctx := cs.ctx + + cc.wmu.Lock() + defer cc.wmu.Unlock() + + // If the request was canceled while waiting for cc.mu, just quit. + select { + case <-cs.abort: + return cs.abortErr + case <-ctx.Done(): + return ctx.Err() + case <-cs.reqCancel: + return common.ErrRequestCanceled + default: + } + + // Encode headers. + // + // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is + // sent by writeRequestBody below, along with any Trailers, + // again in form HEADERS{1}, CONTINUATION{0,}) + trailers, err := commaSeparatedTrailers(req) + if err != nil { + return err + } + hasTrailers := trailers != "" + contentLen := actualContentLength(req) + hasBody := contentLen != 0 + hdrs, err := cc.encodeHeaders(req, cs.requestedGzip, trailers, contentLen, dumps) + if err != nil { + return err + } + + // Write the request. + endStream := !hasBody && !hasTrailers + cs.sentHeaders = true + err = cc.writeHeaders(cs.ID, endStream, int(cc.maxFrameSize), hdrs) + traceWroteHeaders(cs.trace) + return err +} + +// cleanupWriteRequest performs post-request tasks. +// +// If err (the result of writeRequest) is non-nil and the stream is not closed, +// cleanupWriteRequest will send a reset to the peer. +func (cs *clientStream) cleanupWriteRequest(err error) { + cc := cs.cc + + if cs.ID == 0 { + // We were canceled before creating the stream, so return our reservation. + cc.decrStreamReservations() + } + + // TODO: write h12Compare test showing whether + // Request.Body is closed by the Transport, + // and in multiple cases: server replies <=299 and >299 + // while still writing request body + cc.mu.Lock() + mustCloseBody := false + if cs.reqBody != nil && cs.reqBodyClosed == nil { + mustCloseBody = true + cs.reqBodyClosed = make(chan struct{}) + } + bodyClosed := cs.reqBodyClosed + cc.mu.Unlock() + if mustCloseBody { + cs.reqBody.Close() + close(bodyClosed) + } + if bodyClosed != nil { + <-bodyClosed + } + + if err != nil && cs.sentEndStream { + // If the connection is closed immediately after the response is read, + // we may be aborted before finishing up here. If the stream was closed + // cleanly on both sides, there is no error. + select { + case <-cs.peerClosed: + err = nil + default: + } + } + if err != nil { + cs.abortStream(err) // possibly redundant, but harmless + if cs.sentHeaders { + if se, ok := err.(StreamError); ok { + if se.Cause != errFromPeer { + cc.writeStreamReset(cs.ID, se.Code, err) + } + } else { + cc.writeStreamReset(cs.ID, ErrCodeCancel, err) + } + } + cs.bufPipe.CloseWithError(err) // no-op if already closed + } else { + if cs.sentHeaders && !cs.sentEndStream { + cc.writeStreamReset(cs.ID, ErrCodeNo, nil) + } + cs.bufPipe.CloseWithError(common.ErrRequestCanceled) + } + if cs.ID != 0 { + cc.forgetStreamID(cs.ID) + } + + cc.wmu.Lock() + werr := cc.werr + cc.wmu.Unlock() + if werr != nil { + cc.Close() + } + + close(cs.donec) +} + +// awaitOpenSlotForStreamLocked waits until len(streams) < maxConcurrentStreams. +// Must hold cc.mu. +func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error { + for { + cc.lastActive = time.Now() + if cc.closed || !cc.canTakeNewRequestLocked() { + return errClientConnUnusable + } + cc.lastIdle = time.Time{} + if int64(len(cc.streams)) < int64(cc.maxConcurrentStreams) { + return nil + } + cc.pendingRequests++ + cc.cond.Wait() + cc.pendingRequests-- + select { + case <-cs.abort: + return cs.abortErr + default: + } + } +} + +// requires cc.wmu be held +func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, maxFrameSize int, hdrs []byte) error { + first := true // first frame written (HEADERS is first, then CONTINUATION) + for len(hdrs) > 0 && cc.werr == nil { + chunk := hdrs + if len(chunk) > maxFrameSize { + chunk = chunk[:maxFrameSize] + } + hdrs = hdrs[len(chunk):] + endHeaders := len(hdrs) == 0 + if first { + cc.fr.WriteHeaders(HeadersFrameParam{ + StreamID: streamID, + BlockFragment: chunk, + EndStream: endStream, + EndHeaders: endHeaders, + Priority: cc.t.HeaderPriority, + }) + first = false + } else { + cc.fr.WriteContinuation(streamID, endHeaders, chunk) + } + } + cc.bw.Flush() + return cc.werr +} + +// internal error values; they don't escape to callers +var ( + // abort request body write; don't send cancel + errStopReqBodyWrite = errors.New("http2: aborting request body write") + + // abort request body write, but send stream reset of cancel. + errStopReqBodyWriteAndCancel = errors.New("http2: canceling request") + + errReqBodyTooLong = errors.New("http2: request body larger than specified content length") +) + +// frameScratchBufferLen returns the length of a buffer to use for +// outgoing request bodies to read/write to/from. +// +// It returns max(1, min(peer's advertised max frame size, +// Request.ContentLength+1, 512KB)). +func (cs *clientStream) frameScratchBufferLen(maxFrameSize int) int { + const max = 512 << 10 + n := int64(maxFrameSize) + if n > max { + n = max + } + if cl := cs.reqBodyContentLength; cl != -1 && cl+1 < n { + // Add an extra byte past the declared content-length to + // give the caller's Request.Body io.textprotoReader a chance to + // give us more bytes than they declared, so we can catch it + // early. + n = cl + 1 + } + if n < 1 { + return 1 + } + return int(n) // doesn't truncate; max is 512K +} + +// Seven bufPools manage different frame sizes. This helps to avoid scenarios where long-running +// streaming requests using small frame sizes occupy large buffers initially allocated for prior +// requests needing big buffers. The size ranges are as follows: +// {0 KB, 16 KB], {16 KB, 32 KB], {32 KB, 64 KB], {64 KB, 128 KB], {128 KB, 256 KB], +// {256 KB, 512 KB], {512 KB, infinity} +// In practice, the maximum scratch buffer size should not exceed 512 KB due to +// frameScratchBufferLen(maxFrameSize), thus the "infinity pool" should never be used. +// It exists mainly as a safety measure, for potential future increases in max buffer size. +var bufPools [7]sync.Pool // of *[]byte +func bufPoolIndex(size int) int { + if size <= 16384 { + return 0 + } + size -= 1 + bits := bits.Len(uint(size)) + index := bits - 14 + if index >= len(bufPools) { + return len(bufPools) - 1 + } + return index +} + +func (cs *clientStream) writeRequestBody(req *http.Request, dumps []*dump.Dumper) (err error) { + cc := cs.cc + body := cs.reqBody + sentEnd := false // whether we sent the final DATA frame w/ END_STREAM + + hasTrailers := req.Trailer != nil + remainLen := cs.reqBodyContentLength + hasContentLen := remainLen != -1 + + cc.mu.Lock() + maxFrameSize := int(cc.maxFrameSize) + cc.mu.Unlock() + + // Scratch buffer for reading into & writing from. + scratchLen := cs.frameScratchBufferLen(maxFrameSize) + var buf []byte + index := bufPoolIndex(scratchLen) + if bp, ok := bufPools[index].Get().(*[]byte); ok && len(*bp) >= scratchLen { + defer bufPools[index].Put(bp) + buf = *bp + } else { + buf = make([]byte, scratchLen) + defer bufPools[index].Put(&buf) + } + + writeData := cc.fr.WriteData + if len(dumps) > 0 { + writeData = func(streamID uint32, endStream bool, data []byte) error { + for _, dump := range dumps { + dump.DumpRequestBody(data) + } + return cc.fr.WriteData(streamID, endStream, data) + } + } + + var sawEOF bool + for !sawEOF { + n, err := body.Read(buf[:]) + if hasContentLen { + remainLen -= int64(n) + if remainLen == 0 && err == nil { + // The request body's Content-Length was predeclared and + // we just finished reading it all, but the underlying io.textprotoReader + // returned the final chunk with a nil error (which is one of + // the two valid things a textprotoReader can do at EOF). Because we'd prefer + // to send the END_STREAM bit early, double-check that we're actually + // at EOF. Subsequent reads should return (0, EOF) at this point. + // If either value is different, we return an error in one of two ways below. + var scratch [1]byte + var n1 int + n1, err = body.Read(scratch[:]) + remainLen -= int64(n1) + } + if remainLen < 0 { + err = errReqBodyTooLong + return err + } + } + if err != nil { + cc.mu.Lock() + bodyClosed := cs.reqBodyClosed != nil + cc.mu.Unlock() + switch { + case bodyClosed: + return errStopReqBodyWrite + case err == io.EOF: + sawEOF = true + err = nil + default: + return err + } + } + + remain := buf[:n] + for len(remain) > 0 && err == nil { + var allowed int32 + allowed, err = cs.awaitFlowControl(len(remain)) + if err != nil { + return err + } + cc.wmu.Lock() + data := remain[:allowed] + remain = remain[allowed:] + sentEnd = sawEOF && len(remain) == 0 && !hasTrailers + err = writeData(cs.ID, sentEnd, data) + if err == nil { + // TODO(bradfitz): this flush is for latency, not bandwidth. + // Most requests won't need this. Make this opt-in or + // opt-out? Use some heuristic on the body type? Nagel-like + // timers? Based on 'n'? Only last chunk of this for loop, + // unless flow control tokens are low? For now, always. + // If we change this, see comment below. + err = cc.bw.Flush() + } + cc.wmu.Unlock() + } + if err != nil { + return err + } + } + + if sentEnd { + // Already sent END_STREAM (which implies we have no + // trailers) and flushed, because currently all + // WriteData frames above get a flush. So we're done. + return nil + } + + // Since the RoundTrip contract permits the caller to "mutate or reuse" + // a request after the Response's Body is closed, verify that this hasn't + // happened before accessing the trailers. + cc.mu.Lock() + trailer := req.Trailer + err = cs.abortErr + cc.mu.Unlock() + if err != nil { + return err + } + + cc.wmu.Lock() + defer cc.wmu.Unlock() + var trls []byte + if len(trailer) > 0 { + trls, err = cc.encodeTrailers(trailer, dumps) + if err != nil { + return err + } + } + + // Two ways to send END_STREAM: either with trailers, or + // with an empty DATA frame. + if len(trls) > 0 { + err = cc.writeHeaders(cs.ID, true, maxFrameSize, trls) + } else { + err = cc.fr.WriteData(cs.ID, true, nil) + } + if ferr := cc.bw.Flush(); ferr != nil && err == nil { + err = ferr + } + return err +} + +// awaitFlowControl waits for [1, min(maxBytes, cc.cs.maxFrameSize)] flow +// control tokens from the server. +// It returns either the non-zero number of tokens taken or an error +// if the stream is dead. +func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) { + cc := cs.cc + ctx := cs.ctx + cc.mu.Lock() + defer cc.mu.Unlock() + for { + if cc.closed { + return 0, errClientConnClosed + } + if cs.reqBodyClosed != nil { + return 0, errStopReqBodyWrite + } + select { + case <-cs.abort: + return 0, cs.abortErr + case <-ctx.Done(): + return 0, ctx.Err() + case <-cs.reqCancel: + return 0, common.ErrRequestCanceled + default: + } + if a := cs.flow.available(); a > 0 { + take := a + if int(take) > maxBytes { + take = int32(maxBytes) // can't truncate int; take is int32 + } + if take > int32(cc.maxFrameSize) { + take = int32(cc.maxFrameSize) + } + cs.flow.take(take) + return take, nil + } + cc.cond.Wait() + } +} + +func validateHeaders(hdrs http.Header) string { + for k, vv := range hdrs { + if !httpguts.ValidHeaderFieldName(k) { + return fmt.Sprintf("name %q", k) + } + for _, v := range vv { + if !httpguts.ValidHeaderFieldValue(v) { + // Don't include the value in the error, + // because it may be sensitive. + return fmt.Sprintf("value for header %q", k) + } + } + } + return "" +} + +var errNilRequestURL = errors.New("http2: Request.URI is nil") + +// requires cc.wmu be held. +func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64, dumps []*dump.Dumper) ([]byte, error) { + cc.hbuf.Reset() + if req.URL == nil { + return nil, errNilRequestURL + } + + host := req.Host + if host == "" { + host = req.URL.Host + } + host, err := httpguts.PunycodeHostPort(host) + if err != nil { + return nil, err + } + if !httpguts.ValidHostHeader(host) { + return nil, errors.New("http2: invalid Host header") + } + + var path string + if req.Method != "CONNECT" { + path = req.URL.RequestURI() + if !validPseudoPath(path) { + orig := path + path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host) + if !validPseudoPath(path) { + if req.URL.Opaque != "" { + return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque) + } + return nil, fmt.Errorf("invalid request :path %q", orig) + } + } + } + + // Check for any invalid headers+trailers and return an error before we + // potentially pollute our hpack state. (We want to be able to + // continue to reuse the hpack encoder for future requests) + if err := validateHeaders(req.Header); err != "" { + return nil, fmt.Errorf("invalid HTTP header %s", err) + } + if err := validateHeaders(req.Trailer); err != "" { + return nil, fmt.Errorf("invalid HTTP trailer %s", err) + } + + enumerateHeaders := func(f func(name, value string)) { + var writeHeader func(name string, value ...string) + var kvs []header.KeyValues + sort := false + + if req.Header != nil && len(req.Header[header.PseudoHeaderOderKey]) > 0 { + writeHeader = func(name string, value ...string) { + kvs = append(kvs, header.KeyValues{ + Key: name, + Values: value, + }) + } + sort = true + } else { + writeHeader = func(name string, value ...string) { + for _, v := range value { + f(name, v) + } + } + } + + // 8.1.2.3 Request Pseudo-Header Fields + // The :path pseudo-header field includes the path and query parts of the + // target URI (the path-absolute production and optionally a '?' character + // followed by the query production, see Sections 3.3 and 3.4 of + // [RFC3986]). + writeHeader(":authority", host) + m := req.Method + if m == "" { + m = http.MethodGet + } + writeHeader(":method", m) + if req.Method != "CONNECT" { + writeHeader(":path", path) + writeHeader(":scheme", req.URL.Scheme) + } + if sort { + header.SortKeyValues(kvs, req.Header[header.PseudoHeaderOderKey]) + for _, kv := range kvs { + for _, v := range kv.Values { + f(kv.Key, v) + } + } + } + + if req.Header != nil && len(req.Header[header.HeaderOderKey]) > 0 { + sort = true + kvs = nil + writeHeader = func(name string, value ...string) { + kvs = append(kvs, header.KeyValues{ + Key: name, + Values: value, + }) + } + } else { + sort = false + writeHeader = func(name string, value ...string) { + for _, v := range value { + f(name, v) + } + } + } + + if trailers != "" { + writeHeader("trailer", trailers) + } + + var didUA bool + for k, vv := range req.Header { + if header.IsExcluded(k) { + continue + } else if ascii.EqualFold(k, "user-agent") { + // Match Go's http1 behavior: at most one + // User-Agent. If set to nil or empty string, + // then omit it. Otherwise if not mentioned, + // include the default (below). + didUA = true + if len(vv) < 1 { + continue + } + vv = vv[:1] + if vv[0] == "" { + continue + } + } else if ascii.EqualFold(k, "cookie") { + var vals []string + // Per 8.1.2.5 To allow for better compression efficiency, the + // Cookie header field MAY be split into separate header fields, + // each with one or more cookie-pairs. + for _, v := range vv { + for { + p := strings.IndexByte(v, ';') + if p < 0 { + break + } + vals = append(vals, v[:p]) + // writeHeader("cookie", v[:p]) + p++ + // strip space after semicolon if any. + for p+1 <= len(v) && v[p] == ' ' { + p++ + } + v = v[p:] + } + if len(v) > 0 { + vals = append(vals, v) + // writeHeader("cookie", v) + } + } + writeHeader("cookie", vals...) + continue + } + + writeHeader(k, vv...) + } + if shouldSendReqContentLength(req.Method, contentLength) { + writeHeader("content-length", strconv.FormatInt(contentLength, 10)) + } + if addGzipHeader { + writeHeader("accept-encoding", "gzip") + } + if !didUA { + writeHeader("user-agent", header.DefaultUserAgent) + } + + if sort { + header.SortKeyValues(kvs, req.Header[header.HeaderOderKey]) + for _, kv := range kvs { + for _, v := range kv.Values { + f(kv.Key, v) + } + } + } + } + + // Do a first pass over the headers counting bytes to ensure + // we don't exceed cc.peerMaxHeaderListSize. This is done as a + // separate pass before encoding the headers to prevent + // modifying the hpack state. + hlSize := uint64(0) + enumerateHeaders(func(name, value string) { + hf := hpack.HeaderField{Name: name, Value: value} + hlSize += uint64(hf.Size()) + }) + + if hlSize > cc.peerMaxHeaderListSize { + return nil, errRequestHeaderListSize + } + + trace := httptrace.ContextClientTrace(req.Context()) + traceHeaders := traceHasWroteHeaderField(trace) + + writeHeader := cc.writeHeader + headerDumps := []*dump.Dumper{} + if len(dumps) > 0 { + for _, dump := range dumps { + if dump.RequestHeader() { + headerDumps = append(headerDumps, dump) + } + } + if len(headerDumps) > 0 { + writeHeader = func(name, value string) { + for _, dump := range headerDumps { + dump.DumpRequestHeader([]byte(fmt.Sprintf("%s: %s\r\n", name, value))) + } + cc.writeHeader(name, value) + } + } + } + + // Header list size is ok. Write the headers. + enumerateHeaders(func(name, value string) { + name, ascii := lowerHeader(name) + if !ascii { + // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header + // field names have to be ASCII characters (just as in HTTP/1.x). + return + } + writeHeader(name, value) + if traceHeaders { + traceWroteHeaderField(trace, name, value) + } + }) + + for _, dump := range headerDumps { + dump.DumpRequestHeader([]byte("\r\n")) + } + + return cc.hbuf.Bytes(), nil +} + +// shouldSendReqContentLength reports whether the http2.Transport should send +// a "content-length" request header. This logic is basically a copy of the net/http +// transferWriter.shouldSendContentLength. +// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown). +// -1 means unknown. +func shouldSendReqContentLength(method string, contentLength int64) bool { + if contentLength > 0 { + return true + } + if contentLength < 0 { + return false + } + // For zero bodies, whether we send a content-length depends on the method. + // It also kinda doesn't matter for http2 either way, with END_STREAM. + switch method { + case "POST", "PUT", "PATCH": + return true + default: + return false + } +} + +// requires cc.wmu be held. +func (cc *ClientConn) encodeTrailers(trailer http.Header, dumps []*dump.Dumper) ([]byte, error) { + cc.hbuf.Reset() + + hlSize := uint64(0) + for k, vv := range trailer { + for _, v := range vv { + hf := hpack.HeaderField{Name: k, Value: v} + hlSize += uint64(hf.Size()) + } + } + if hlSize > cc.peerMaxHeaderListSize { + return nil, errRequestHeaderListSize + } + + writeHeader := cc.writeHeader + if len(dumps) > 0 { + writeHeader = func(name, value string) { + for _, dump := range dumps { + dump.DumpRequestHeader([]byte(fmt.Sprintf("%s: %s\r\n", name, value))) + } + cc.writeHeader(name, value) + } + } + + for k, vv := range trailer { + lowKey, ascii := lowerHeader(k) + if !ascii { + // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header + // field names have to be ASCII characters (just as in HTTP/1.x). + continue + } + // Transfer-Encoding, etc.. have already been filtered at the + // start of RoundTrip + for _, v := range vv { + writeHeader(lowKey, v) + } + } + return cc.hbuf.Bytes(), nil +} + +func (cc *ClientConn) writeHeader(name, value string) { + if VerboseLogs { + log.Printf("http2: Transport encoding header %q = %q", name, value) + } + cc.henc.WriteField(hpack.HeaderField{Name: name, Value: value}) +} + +type resAndError struct { + _ incomparable + res *http.Response + err error +} + +// requires cc.mu be held. +func (cc *ClientConn) addStreamLocked(cs *clientStream) { + cs.flow.add(int32(cc.initialWindowSize)) + cs.flow.setConnFlow(&cc.flow) + cs.inflow.init(transportDefaultStreamFlow) + cs.ID = cc.nextStreamID + cc.nextStreamID += 2 + cc.streams[cs.ID] = cs + if cs.ID == 0 { + panic("assigned stream ID 0") + } +} + +func (cc *ClientConn) forgetStreamID(id uint32) { + cc.mu.Lock() + slen := len(cc.streams) + delete(cc.streams, id) + if len(cc.streams) != slen-1 { + panic("forgetting unknown stream id") + } + cc.lastActive = time.Now() + if len(cc.streams) == 0 && cc.idleTimer != nil { + cc.idleTimer.Reset(cc.idleTimeout) + cc.lastIdle = time.Now() + } + // Wake up writeRequestBody via clientStream.awaitFlowControl and + // wake up RoundTrip if there is a pending request. + cc.cond.Broadcast() + + closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.DisableKeepAlives || cc.goAway != nil + if closeOnIdle && cc.streamsReserved == 0 && len(cc.streams) == 0 { + if VerboseLogs { + cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, cc.nextStreamID-2) + } + cc.closed = true + defer cc.closeConn() + } + + cc.mu.Unlock() +} + +// clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop. +type clientConnReadLoop struct { + _ incomparable + cc *ClientConn +} + +// readLoop runs in its own goroutine and reads and dispatches frames. +func (cc *ClientConn) readLoop() { + rl := &clientConnReadLoop{cc: cc} + defer rl.cleanup() + cc.readerErr = rl.run() + if ce, ok := cc.readerErr.(ConnectionError); ok { + cc.wmu.Lock() + cc.fr.WriteGoAway(0, ErrCode(ce), nil) + cc.wmu.Unlock() + } +} + +// GoAwayError is returned by the Transport when the server closes the +// TCP connection after sending a GOAWAY frame. +type GoAwayError struct { + LastStreamID uint32 + ErrCode ErrCode + DebugData string +} + +func (e GoAwayError) Error() string { + return fmt.Sprintf("http2: server sent GOAWAY and closed the connection; LastStreamID=%v, ErrCode=%v, debug=%q", + e.LastStreamID, e.ErrCode, e.DebugData) +} + +func isEOFOrNetReadError(err error) bool { + if err == io.EOF { + return true + } + ne, ok := err.(*net.OpError) + return ok && ne.Op == "read" +} + +func (rl *clientConnReadLoop) cleanup() { + cc := rl.cc + cc.t.connPool().MarkDead(cc) + defer cc.closeConn() + defer close(cc.readerDone) + + if cc.idleTimer != nil { + cc.idleTimer.Stop() + } + + // Close any response bodies if the server closes prematurely. + // TODO: also do this if we've written the headers but not + // gotten a response yet. + err := cc.readerErr + cc.mu.Lock() + if cc.goAway != nil && isEOFOrNetReadError(err) { + err = GoAwayError{ + LastStreamID: cc.goAway.LastStreamID, + ErrCode: cc.goAway.ErrCode, + DebugData: cc.goAwayDebug, + } + } else if err == io.EOF { + err = io.ErrUnexpectedEOF + } + cc.closed = true + + for _, cs := range cc.streams { + select { + case <-cs.peerClosed: + // The server closed the stream before closing the conn, + // so no need to interrupt it. + default: + cs.abortStreamLocked(err) + } + } + cc.cond.Broadcast() + cc.mu.Unlock() +} + +// countReadFrameError calls Transport.CountError with a string +// representing err. +func (cc *ClientConn) countReadFrameError(err error) { + f := cc.t.CountError + if f == nil || err == nil { + return + } + if ce, ok := err.(ConnectionError); ok { + errCode := ErrCode(ce) + f(fmt.Sprintf("read_frame_conn_error_%s", errCode.stringToken())) + return + } + if errors.Is(err, io.EOF) { + f("read_frame_eof") + return + } + if errors.Is(err, io.ErrUnexpectedEOF) { + f("read_frame_unexpected_eof") + return + } + if errors.Is(err, errFrameTooLarge) { + f("read_frame_too_large") + return + } + f("read_frame_other") +} + +func (rl *clientConnReadLoop) run() error { + cc := rl.cc + gotSettings := false + readIdleTimeout := cc.t.ReadIdleTimeout + var t timer + if readIdleTimeout != 0 { + t = cc.t.afterFunc(readIdleTimeout, cc.healthCheck) + } + for { + f, err := cc.fr.ReadFrame() + if t != nil { + t.Reset(readIdleTimeout) + } + if err != nil { + cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err) + } + if se, ok := err.(StreamError); ok { + if cs := rl.streamByID(se.StreamID); cs != nil { + if se.Cause == nil { + se.Cause = cc.fr.errDetail + } + rl.endStreamError(cs, se) + } + continue + } else if err != nil { + cc.countReadFrameError(err) + return err + } + if VerboseLogs { + cc.vlogf("http2: Transport received %s", summarizeFrame(f)) + } + if !gotSettings { + if _, ok := f.(*SettingsFrame); !ok { + cc.logf("protocol error: received %T before a SETTINGS frame", f) + return ConnectionError(ErrCodeProtocol) + } + gotSettings = true + } + + switch f := f.(type) { + case *MetaHeadersFrame: + err = rl.processHeaders(f) + case *DataFrame: + err = rl.processData(f) + case *GoAwayFrame: + err = rl.processGoAway(f) + case *RSTStreamFrame: + err = rl.processResetStream(f) + case *SettingsFrame: + err = rl.processSettings(f) + case *PushPromiseFrame: + err = rl.processPushPromise(f) + case *WindowUpdateFrame: + err = rl.processWindowUpdate(f) + case *PingFrame: + err = rl.processPing(f) + default: + cc.logf("Transport: unhandled response frame type %T", f) + } + if err != nil { + if VerboseLogs { + cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err) + } + return err + } + } +} + +func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { + cs := rl.streamByID(f.StreamID) + if cs == nil { + // We'd get here if we canceled a request while the + // server had its response still in flight. So if this + // was just something we canceled, ignore it. + return nil + } + if cs.readClosed { + rl.endStreamError(cs, StreamError{ + StreamID: f.StreamID, + Code: ErrCodeProtocol, + Cause: errors.New("protocol error: headers after END_STREAM"), + }) + return nil + } + if !cs.firstByte { + if cs.trace != nil { + // TODO(bradfitz): move first response byte earlier, + // when we first read the 9 byte header, not waiting + // until all the HEADERS+CONTINUATION frames have been + // merged. This works for now. + traceFirstResponseByte(cs.trace) + } + cs.firstByte = true + } + if !cs.pastHeaders { + cs.pastHeaders = true + } else { + return rl.processTrailers(cs, f) + } + + res, err := rl.handleResponse(cs, f) + if err != nil { + if _, ok := err.(ConnectionError); ok { + return err + } + // Any other error type is a stream error. + rl.endStreamError(cs, StreamError{ + StreamID: f.StreamID, + Code: ErrCodeProtocol, + Cause: err, + }) + return nil // return nil from process* funcs to keep conn alive + } + if res == nil { + // (nil, nil) special case. See handleResponse docs. + return nil + } + cs.resTrailer = &res.Trailer + cs.res = res + close(cs.respHeaderRecv) + if f.StreamEnded() { + rl.endStream(cs) + } + return nil +} + +// foreachHeaderElement splits v according to the "#rule" construction +// in RFC 7230 section 7 and calls fn for each non-empty element. +func foreachHeaderElement(v string, fn func(string)) { + v = textproto.TrimString(v) + if v == "" { + return + } + if !strings.Contains(v, ",") { + fn(v) + return + } + for _, f := range strings.Split(v, ",") { + if f = textproto.TrimString(f); f != "" { + fn(f) + } + } +} + +// may return error types nil, or ConnectionError. Any other error value +// is a StreamError of type ErrCodeProtocol. The returned error in that case +// is the detail. +// +// As a special case, handleResponse may return (nil, nil) to skip the +// frame (currently only used for 1xx responses). +func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFrame) (*http.Response, error) { + if f.Truncated { + return nil, errResponseHeaderListSize + } + + status := f.PseudoValue("status") + if status == "" { + return nil, errors.New("malformed response from server: missing status pseudo header") + } + statusCode, err := strconv.Atoi(status) + if err != nil { + return nil, errors.New("malformed response from server: malformed non-numeric status pseudo header") + } + + regularFields := f.RegularFields() + strs := make([]string, len(regularFields)) + header := make(http.Header, len(regularFields)) + res := &http.Response{ + Proto: "HTTP/2.0", + ProtoMajor: 2, + Header: header, + StatusCode: statusCode, + Status: status + " " + http.StatusText(statusCode), + } + for _, hf := range regularFields { + key := canonicalHeader(hf.Name) + if key == "Trailer" { + t := res.Trailer + if t == nil { + t = make(http.Header) + res.Trailer = t + } + foreachHeaderElement(hf.Value, func(v string) { + t[canonicalHeader(v)] = nil + }) + } else { + vv := header[key] + if vv == nil && len(strs) > 0 { + // More than likely this will be a single-element key. + // Most headers aren't multi-valued. + // Set the capacity on strs[0] to 1, so any future append + // won't extend the slice into the other strings. + vv, strs = strs[:1:1], strs[1:] + vv[0] = hf.Value + header[key] = vv + } else { + header[key] = append(vv, hf.Value) + } + } + } + + if statusCode >= 100 && statusCode <= 199 { + if f.StreamEnded() { + return nil, errors.New("1xx informational response with END_STREAM flag") + } + cs.num1xx++ + const max1xxResponses = 5 // arbitrary bound on number of informational responses, same as net/http + if cs.num1xx > max1xxResponses { + return nil, errors.New("http2: too many 1xx informational responses") + } + if fn := cs.get1xxTraceFunc(); fn != nil { + if err := fn(statusCode, textproto.MIMEHeader(header)); err != nil { + return nil, err + } + } + if statusCode == 100 { + traceGot100Continue(cs.trace) + select { + case cs.on100 <- struct{}{}: + default: + } + } + cs.pastHeaders = false // do it all again + return nil, nil + } + + res.ContentLength = -1 + if clens := res.Header["Content-Length"]; len(clens) == 1 { + if cl, err := strconv.ParseUint(clens[0], 10, 63); err == nil { + res.ContentLength = int64(cl) + } else { + // TODO: care? unlike http/1, it won't mess up our framing, so it's + // more safe smuggling-wise to ignore. + } + } else if len(clens) > 1 { + // TODO: care? unlike http/1, it won't mess up our framing, so it's + // more safe smuggling-wise to ignore. + } else if f.StreamEnded() && !cs.isHead { + res.ContentLength = 0 + } + + if cs.isHead { + res.Body = noBody + return res, nil + } + + if f.StreamEnded() { + if res.ContentLength > 0 { + res.Body = missingBody{} + } else { + res.Body = noBody + } + return res, nil + } + + cs.bufPipe.setBuffer(&dataBuffer{expected: res.ContentLength}) + cs.bytesRemain = res.ContentLength + res.Body = transportResponseBody{cs} + + if cs.requestedGzip && ascii.EqualFold(res.Header.Get("Content-Encoding"), "gzip") { + res.Header.Del("Content-Encoding") + res.Header.Del("Content-Length") + res.ContentLength = -1 + res.Body = compress.NewGzipReader(res.Body) + res.Uncompressed = true + } else if cs.cc.t.AutoDecompression { + contentEncoding := res.Header.Get("Content-Encoding") + if contentEncoding != "" { + res.Header.Del("Content-Encoding") + res.Header.Del("Content-Length") + res.ContentLength = -1 + res.Uncompressed = true + res.Body = compress.NewCompressReader(res.Body, contentEncoding) + } + } + + return res, nil +} + +func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFrame) error { + if cs.pastTrailers { + // Too many HEADERS frames for this stream. + return ConnectionError(ErrCodeProtocol) + } + cs.pastTrailers = true + if !f.StreamEnded() { + // We expect that any headers for trailers also + // has END_STREAM. + return ConnectionError(ErrCodeProtocol) + } + if len(f.PseudoFields()) > 0 { + // No pseudo header fields are defined for trailers. + // TODO: ConnectionError might be overly harsh? Check. + return ConnectionError(ErrCodeProtocol) + } + + trailer := make(http.Header) + for _, hf := range f.RegularFields() { + key := canonicalHeader(hf.Name) + trailer[key] = append(trailer[key], hf.Value) + } + cs.trailer = trailer + + rl.endStream(cs) + return nil +} + +// transportResponseBody is the concrete type of Transport.RoundTrip's +// Response.Body. It is an io.ReadCloser. +type transportResponseBody struct { + cs *clientStream +} + +func (b transportResponseBody) Read(p []byte) (n int, err error) { + cs := b.cs + cc := cs.cc + + if cs.readErr != nil { + return 0, cs.readErr + } + n, err = b.cs.bufPipe.Read(p) + if cs.bytesRemain != -1 { + if int64(n) > cs.bytesRemain { + n = int(cs.bytesRemain) + if err == nil { + err = errors.New("net/http: server replied with more than declared Content-Length; truncated") + cs.abortStream(err) + } + cs.readErr = err + return int(cs.bytesRemain), err + } + cs.bytesRemain -= int64(n) + if err == io.EOF && cs.bytesRemain > 0 { + err = io.ErrUnexpectedEOF + cs.readErr = err + return n, err + } + } + if n == 0 { + // No flow control tokens to send back. + return + } + + cc.mu.Lock() + connAdd := cc.inflow.add(n) + var streamAdd int32 + if err == nil { // No need to refresh if the stream is over or failed. + streamAdd = cs.inflow.add(n) + } + cc.mu.Unlock() + + if connAdd != 0 || streamAdd != 0 { + cc.wmu.Lock() + defer cc.wmu.Unlock() + if connAdd != 0 { + cc.fr.WriteWindowUpdate(0, mustUint31(connAdd)) + } + if streamAdd != 0 { + cc.fr.WriteWindowUpdate(cs.ID, mustUint31(streamAdd)) + } + cc.bw.Flush() + } + return +} + +var errClosedResponseBody = errors.New("http2: response body closed") + +func (b transportResponseBody) Close() error { + cs := b.cs + cc := cs.cc + + cs.bufPipe.BreakWithError(errClosedResponseBody) + cs.abortStream(errClosedResponseBody) + + unread := cs.bufPipe.Len() + if unread > 0 { + cc.mu.Lock() + // Return connection-level flow control. + connAdd := cc.inflow.add(unread) + cc.mu.Unlock() + + // TODO(dneil): Acquiring this mutex can block indefinitely. + // Move flow control return to a goroutine? + cc.wmu.Lock() + // Return connection-level flow control. + if connAdd > 0 { + cc.fr.WriteWindowUpdate(0, uint32(connAdd)) + } + cc.bw.Flush() + cc.wmu.Unlock() + } + + select { + case <-cs.donec: + case <-cs.ctx.Done(): + // See golang/go#49366: The net/http package can cancel the + // request context after the response body is fully read. + // Don't treat this as an error. + return nil + case <-cs.reqCancel: + return common.ErrRequestCanceled + } + return nil +} + +func (rl *clientConnReadLoop) processData(f *DataFrame) error { + cc := rl.cc + cs := rl.streamByID(f.StreamID) + data := f.Data() + if cs == nil { + cc.mu.Lock() + neverSent := cc.nextStreamID + cc.mu.Unlock() + if f.StreamID >= neverSent { + // We never asked for this. + cc.logf("http2: Transport received unsolicited DATA frame; closing connection") + return ConnectionError(ErrCodeProtocol) + } + // We probably did ask for this, but canceled. Just ignore it. + // TODO: be stricter here? only silently ignore things which + // we canceled, but not things which were closed normally + // by the peer? Tough without accumulating too much state. + + // But at least return their flow control: + if f.Length > 0 { + cc.mu.Lock() + ok := cc.inflow.take(f.Length) + connAdd := cc.inflow.add(int(f.Length)) + cc.mu.Unlock() + if !ok { + return ConnectionError(ErrCodeFlowControl) + } + if connAdd > 0 { + cc.wmu.Lock() + cc.fr.WriteWindowUpdate(0, uint32(connAdd)) + cc.bw.Flush() + cc.wmu.Unlock() + } + } + return nil + } + if cs.readClosed { + cc.logf("protocol error: received DATA after END_STREAM") + rl.endStreamError(cs, StreamError{ + StreamID: f.StreamID, + Code: ErrCodeProtocol, + }) + return nil + } + if !cs.pastHeaders { + cc.logf("protocol error: received DATA before a HEADERS frame") + rl.endStreamError(cs, StreamError{ + StreamID: f.StreamID, + Code: ErrCodeProtocol, + }) + return nil + } + if f.Length > 0 { + if cs.isHead && len(data) > 0 { + cc.logf("protocol error: received DATA on a HEAD request") + rl.endStreamError(cs, StreamError{ + StreamID: f.StreamID, + Code: ErrCodeProtocol, + }) + return nil + } + // Check connection-level flow control. + cc.mu.Lock() + if !takeInflows(&cc.inflow, &cs.inflow, f.Length) { + cc.mu.Unlock() + return ConnectionError(ErrCodeFlowControl) + } + // Return any padded flow control now, since we won't + // refund it later on body reads. + var refund int + if pad := int(f.Length) - len(data); pad > 0 { + refund += pad + } + + didReset := false + var err error + if len(data) > 0 { + if _, err = cs.bufPipe.Write(data); err != nil { + // Return len(data) now if the stream is already closed, + // since data will never be read. + didReset = true + refund += len(data) + } + } + + sendConn := cc.inflow.add(refund) + var sendStream int32 + if !didReset { + sendStream = cs.inflow.add(refund) + } + cc.mu.Unlock() + + if sendConn > 0 || sendStream > 0 { + cc.wmu.Lock() + if sendConn > 0 { + cc.fr.WriteWindowUpdate(0, uint32(sendConn)) + } + if sendStream > 0 { + cc.fr.WriteWindowUpdate(cs.ID, uint32(sendStream)) + } + cc.bw.Flush() + cc.wmu.Unlock() + } + + if err != nil { + rl.endStreamError(cs, err) + return nil + } + } + + if f.StreamEnded() { + rl.endStream(cs) + } + return nil +} + +func (rl *clientConnReadLoop) endStream(cs *clientStream) { + // TODO: check that any declared content-length matches, like + // server.go's (*stream).endStream method. + if !cs.readClosed { + cs.readClosed = true + // Close cs.bufPipe and cs.peerClosed with cc.mu held to avoid a + // race condition: The caller can read io.EOF from Response.Body + // and close the body before we close cs.peerClosed, causing + // cleanupWriteRequest to send a RST_STREAM. + rl.cc.mu.Lock() + defer rl.cc.mu.Unlock() + cs.bufPipe.closeWithErrorAndCode(io.EOF, cs.copyTrailers) + close(cs.peerClosed) + } +} + +func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) { + cs.readAborted = true + cs.abortStream(err) +} + +func (rl *clientConnReadLoop) streamByID(id uint32) *clientStream { + rl.cc.mu.Lock() + defer rl.cc.mu.Unlock() + cs := rl.cc.streams[id] + if cs != nil && !cs.readAborted { + return cs + } + return nil +} + +func (cs *clientStream) copyTrailers() { + for k, vv := range cs.trailer { + t := cs.resTrailer + if *t == nil { + *t = make(http.Header) + } + (*t)[k] = vv + } +} + +func (rl *clientConnReadLoop) processGoAway(f *GoAwayFrame) error { + cc := rl.cc + cc.t.connPool().MarkDead(cc) + if f.ErrCode != 0 { + // TODO: deal with GOAWAY more. particularly the error code + cc.vlogf("transport got GOAWAY with error code = %v", f.ErrCode) + if fn := cc.t.CountError; fn != nil { + fn("recv_goaway_" + f.ErrCode.stringToken()) + } + } + cc.setGoAway(f) + return nil +} + +func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error { + cc := rl.cc + // Locking both mu and wmu here allows frame encoding to read settings with only wmu held. + // Acquiring wmu when f.IsAck() is unnecessary, but convenient and mostly harmless. + cc.wmu.Lock() + defer cc.wmu.Unlock() + + if err := rl.processSettingsNoWrite(f); err != nil { + return err + } + if !f.IsAck() { + cc.fr.WriteSettingsAck() + cc.bw.Flush() + } + return nil +} + +func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { + cc := rl.cc + cc.mu.Lock() + defer cc.mu.Unlock() + + if f.IsAck() { + if cc.wantSettingsAck { + cc.wantSettingsAck = false + return nil + } + return ConnectionError(ErrCodeProtocol) + } + + var seenMaxConcurrentStreams bool + err := f.ForeachSetting(func(s http2.Setting) error { + switch s.ID { + case http2.SettingMaxFrameSize: + cc.maxFrameSize = s.Val + case http2.SettingMaxConcurrentStreams: + cc.maxConcurrentStreams = s.Val + seenMaxConcurrentStreams = true + case http2.SettingMaxHeaderListSize: + cc.peerMaxHeaderListSize = uint64(s.Val) + case http2.SettingInitialWindowSize: + // Values above the maximum flow-control + // window size of 2^31-1 MUST be treated as a + // connection error (Section 5.4.1) of type + // FLOW_CONTROL_ERROR. + if s.Val > math.MaxInt32 { + return ConnectionError(ErrCodeFlowControl) + } + + // Adjust flow control of currently-open + // frames by the difference of the old initial + // window size and this one. + delta := int32(s.Val) - int32(cc.initialWindowSize) + for _, cs := range cc.streams { + cs.flow.add(delta) + } + cc.cond.Broadcast() + + cc.initialWindowSize = s.Val + default: + // TODO(bradfitz): handle more settings? SETTINGS_HEADER_TABLE_SIZE probably. + cc.vlogf("Unhandled Setting: %v", s) + } + return nil + }) + if err != nil { + return err + } + + if !cc.seenSettings { + if !seenMaxConcurrentStreams { + // This was the servers initial SETTINGS frame and it + // didn't contain a MAX_CONCURRENT_STREAMS field so + // increase the number of concurrent streams this + // connection can establish to our default. + cc.maxConcurrentStreams = defaultMaxConcurrentStreams + } + cc.seenSettings = true + } + + return nil +} + +func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { + cc := rl.cc + cs := rl.streamByID(f.StreamID) + if f.StreamID != 0 && cs == nil { + return nil + } + + cc.mu.Lock() + defer cc.mu.Unlock() + + fl := &cc.flow + if cs != nil { + fl = &cs.flow + } + if !fl.add(int32(f.Increment)) { + // For stream, the sender sends RST_STREAM with an error code of FLOW_CONTROL_ERROR + if cs != nil { + rl.endStreamError(cs, StreamError{ + StreamID: f.StreamID, + Code: ErrCodeFlowControl, + }) + return nil + } + + return ConnectionError(ErrCodeFlowControl) + } + cc.cond.Broadcast() + return nil +} + +func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error { + cs := rl.streamByID(f.StreamID) + if cs == nil { + // TODO: return error if server tries to RST_STREAM an idle stream + return nil + } + serr := streamError(cs.ID, f.ErrCode) + serr.Cause = errFromPeer + if f.ErrCode == ErrCodeProtocol { + rl.cc.SetDoNotReuse() + } + if fn := cs.cc.t.CountError; fn != nil { + fn("recv_rststream_" + f.ErrCode.stringToken()) + } + cs.abortStream(serr) + + cs.bufPipe.CloseWithError(serr) + return nil +} + +// Ping sends a PING frame to the server and waits for the ack. +func (cc *ClientConn) Ping(ctx context.Context) error { + c := make(chan struct{}) + // Generate a random payload + var p [8]byte + for { + if _, err := rand.Read(p[:]); err != nil { + return err + } + cc.mu.Lock() + // check for dup before insert + if _, found := cc.pings[p]; !found { + cc.pings[p] = c + cc.mu.Unlock() + break + } + cc.mu.Unlock() + } + var pingError error + errc := make(chan struct{}) + go func() { + cc.wmu.Lock() + defer cc.wmu.Unlock() + if pingError = cc.fr.WritePing(false, p); pingError != nil { + close(errc) + return + } + if pingError = cc.bw.Flush(); pingError != nil { + close(errc) + return + } + }() + select { + case <-c: + return nil + case <-errc: + return pingError + case <-ctx.Done(): + return ctx.Err() + case <-cc.readerDone: + // connection closed + return cc.readerErr + } +} + +func (rl *clientConnReadLoop) processPing(f *PingFrame) error { + if f.IsAck() { + cc := rl.cc + cc.mu.Lock() + defer cc.mu.Unlock() + // If ack, notify listener if any + if c, ok := cc.pings[f.Data]; ok { + close(c) + delete(cc.pings, f.Data) + } + return nil + } + cc := rl.cc + cc.wmu.Lock() + defer cc.wmu.Unlock() + if err := cc.fr.WritePing(true, f.Data); err != nil { + return err + } + return cc.bw.Flush() +} + +func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error { + // We told the peer we don't want them. + // Spec says: + // "PUSH_PROMISE MUST NOT be sent if the SETTINGS_ENABLE_PUSH + // setting of the peer endpoint is set to 0. An endpoint that + // has set this setting and has received acknowledgement MUST + // treat the receipt of a PUSH_PROMISE frame as a connection + // error (Section 5.4.1) of type PROTOCOL_ERROR." + return ConnectionError(ErrCodeProtocol) +} + +func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) { + // TODO: map err to more interesting error codes, once the + // HTTP community comes up with some. But currently for + // RST_STREAM there's no equivalent to GOAWAY frame's debug + // data, and the error codes are all pretty vague ("cancel"). + cc.wmu.Lock() + cc.fr.WriteRSTStream(streamID, code) + cc.bw.Flush() + cc.wmu.Unlock() +} + +var ( + errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit") + errRequestHeaderListSize = errors.New("http2: request header list larger than peer's advertised limit") +) + +func (cc *ClientConn) logf(format string, args ...interface{}) { + cc.t.logf(format, args...) +} + +func (cc *ClientConn) vlogf(format string, args ...interface{}) { + cc.t.vlogf(format, args...) +} + +func (t *Transport) vlogf(format string, args ...interface{}) { + if VerboseLogs { + t.logf(format, args...) + } +} + +func (t *Transport) logf(format string, args ...interface{}) { + log.Printf(format, args...) +} + +var noBody io.ReadCloser = noBodyReader{} + +type noBodyReader struct{} + +func (noBodyReader) Close() error { return nil } +func (noBodyReader) Read([]byte) (int, error) { return 0, io.EOF } + +type missingBody struct{} + +func (missingBody) Close() error { return nil } + +func (missingBody) Read([]byte) (int, error) { return 0, io.ErrUnexpectedEOF } + +func strSliceContains(ss []string, s string) bool { + for _, v := range ss { + if v == s { + return true + } + } + return false +} + +type erringRoundTripper struct{ err error } + +func (rt erringRoundTripper) RoundTripErr() error { return rt.err } + +func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { + return nil, rt.err +} + +// isConnectionCloseRequest reports whether req should use its own +// connection for a single request and then close the connection. +func isConnectionCloseRequest(req *http.Request) bool { + return req.Close || httpguts.HeaderValuesContainsToken(req.Header["Connection"], "close") +} + +// noDialH2RoundTripper is a RoundTripper which only tries to complete the request +// if there's already has a cached connection to the host. +// (The field is exported so it can be accessed via reflect from net/http; tested +// by TestNoDialH2RoundTripperType) +type noDialH2RoundTripper struct{ *Transport } + +func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + res, err := rt.Transport.RoundTrip(req) + if IsNoCachedConnError(err) { + return nil, http.ErrSkipAltProtocol + } + return res, err +} diff --git a/internal/http3/body.go b/internal/http3/body.go new file mode 100644 index 00000000..fa023ce4 --- /dev/null +++ b/internal/http3/body.go @@ -0,0 +1,134 @@ +package http3 + +import ( + "context" + "errors" + "io" + + "github.com/quic-go/quic-go" +) + +// A Hijacker allows hijacking of the stream creating part of a quic.Session from a http.Response.Body. +// It is used by WebTransport to create WebTransport streams after a session has been established. +type Hijacker interface { + Connection() Connection +} + +var errTooMuchData = errors.New("peer sent too much data") + +// The body is used in the requestBody (for a http.Request) and the responseBody (for a http.Response). +type body struct { + str *stream + + remainingContentLength int64 + violatedContentLength bool + hasContentLength bool +} + +func newBody(str *stream, contentLength int64) *body { + b := &body{str: str} + if contentLength >= 0 { + b.hasContentLength = true + b.remainingContentLength = contentLength + } + return b +} + +func (r *body) StreamID() quic.StreamID { return r.str.StreamID() } + +func (r *body) checkContentLengthViolation() error { + if !r.hasContentLength { + return nil + } + if r.remainingContentLength < 0 || r.remainingContentLength == 0 && r.str.hasMoreData() { + if !r.violatedContentLength { + r.str.CancelRead(quic.StreamErrorCode(ErrCodeMessageError)) + r.str.CancelWrite(quic.StreamErrorCode(ErrCodeMessageError)) + r.violatedContentLength = true + } + return errTooMuchData + } + return nil +} + +func (r *body) Read(b []byte) (int, error) { + if err := r.checkContentLengthViolation(); err != nil { + return 0, err + } + if r.hasContentLength { + b = b[:min(int64(len(b)), r.remainingContentLength)] + } + n, err := r.str.Read(b) + r.remainingContentLength -= int64(n) + if err := r.checkContentLengthViolation(); err != nil { + return n, err + } + return n, maybeReplaceError(err) +} + +func (r *body) Close() error { + r.str.CancelRead(quic.StreamErrorCode(ErrCodeRequestCanceled)) + return nil +} + +type requestBody struct { + body + connCtx context.Context + rcvdSettings <-chan struct{} + getSettings func() *Settings +} + +var _ io.ReadCloser = &requestBody{} + +func newRequestBody(str *stream, contentLength int64, connCtx context.Context, rcvdSettings <-chan struct{}, getSettings func() *Settings) *requestBody { + return &requestBody{ + body: *newBody(str, contentLength), + connCtx: connCtx, + rcvdSettings: rcvdSettings, + getSettings: getSettings, + } +} + +type hijackableBody struct { + body body + + // only set for the http.Response + // The channel is closed when the user is done with this response: + // either when Read() errors, or when Close() is called. + reqDone chan<- struct{} + reqDoneClosed bool +} + +var _ io.ReadCloser = &hijackableBody{} + +func newResponseBody(str *stream, contentLength int64, done chan<- struct{}) *hijackableBody { + return &hijackableBody{ + body: *newBody(str, contentLength), + reqDone: done, + } +} + +func (r *hijackableBody) Read(b []byte) (int, error) { + n, err := r.body.Read(b) + if err != nil { + r.requestDone() + } + return n, maybeReplaceError(err) +} + +func (r *hijackableBody) requestDone() { + if r.reqDoneClosed || r.reqDone == nil { + return + } + if r.reqDone != nil { + close(r.reqDone) + } + r.reqDoneClosed = true +} + +func (r *hijackableBody) Close() error { + r.requestDone() + // If the EOF was read, CancelRead() is a no-op. + r.body.str.CancelRead(quic.StreamErrorCode(ErrCodeRequestCanceled)) + return nil +} diff --git a/internal/http3/client.go b/internal/http3/client.go new file mode 100644 index 00000000..f9341488 --- /dev/null +++ b/internal/http3/client.go @@ -0,0 +1,324 @@ +package http3 + +import ( + "context" + "errors" + "io" + "net/http" + "net/http/httptrace" + "net/textproto" + "sync" + "time" + + "github.com/quic-go/qpack" + "github.com/quic-go/quic-go" + + "github.com/imroc/req/v3/internal/dump" + "github.com/imroc/req/v3/internal/quic-go/quicvarint" + "github.com/imroc/req/v3/internal/transport" +) + +const ( + // MethodGet0RTT allows a GET request to be sent using 0-RTT. + // Note that 0-RTT doesn't provide replay protection and should only be used for idempotent requests. + MethodGet0RTT = "GET_0RTT" + // MethodHead0RTT allows a HEAD request to be sent using 0-RTT. + // Note that 0-RTT doesn't provide replay protection and should only be used for idempotent requests. + MethodHead0RTT = "HEAD_0RTT" +) + +const ( + defaultMaxResponseHeaderBytes = 10 * 1 << 20 // 10 MB +) + +var defaultQuicConfig = &quic.Config{ + MaxIncomingStreams: -1, // don't allow the server to create bidirectional streams + KeepAlivePeriod: 10 * time.Second, +} + +// SingleDestinationRoundTripper is an HTTP/3 client doing requests to a single remote server. +type SingleDestinationRoundTripper struct { + *transport.Options + + Connection quic.Connection + + // Enable support for HTTP/3 datagrams (RFC 9297). + // If a QUICConfig is set, datagram support also needs to be enabled on the QUIC layer by setting EnableDatagrams. + EnableDatagrams bool + + // Additional HTTP/3 settings. + // It is invalid to specify any settings defined by RFC 9114 (HTTP/3) and RFC 9297 (HTTP Datagrams). + AdditionalSettings map[uint64]uint64 + StreamHijacker func(FrameType, quic.ConnectionTracingID, quic.Stream, error) (hijacked bool, err error) + UniStreamHijacker func(ServerStreamType, quic.ConnectionTracingID, quic.ReceiveStream, error) (hijacked bool) + + initOnce sync.Once + hconn *connection + requestWriter *requestWriter + decoder *qpack.Decoder +} + +var _ http.RoundTripper = &SingleDestinationRoundTripper{} + +func (c *SingleDestinationRoundTripper) Start() Connection { + c.initOnce.Do(func() { c.init() }) + return c.hconn +} + +func (c *SingleDestinationRoundTripper) init() { + c.decoder = qpack.NewDecoder(func(hf qpack.HeaderField) {}) + c.requestWriter = newRequestWriter() + c.hconn = newConnection( + c.Connection.Context(), + c.Connection, + c.EnableDatagrams, + PerspectiveClient, + 0, + c.Options, + ) + // send the SETTINGs frame, using 0-RTT data, if possible + go func() { + if err := c.setupConn(c.hconn); err != nil { + if c.Debugf != nil { + c.Debugf("Setting up connection failed: %s", err.Error()) + } + c.hconn.CloseWithError(quic.ApplicationErrorCode(ErrCodeInternalError), "") + } + }() + if c.StreamHijacker != nil { + go c.handleBidirectionalStreams() + } + go c.hconn.HandleUnidirectionalStreams(c.UniStreamHijacker) +} + +func (c *SingleDestinationRoundTripper) setupConn(conn *connection) error { + // open the control stream + str, err := conn.OpenUniStream() + if err != nil { + return err + } + b := make([]byte, 0, 64) + b = quicvarint.Append(b, streamTypeControlStream) + // send the SETTINGS frame + b = (&settingsFrame{Datagram: c.EnableDatagrams, Other: c.AdditionalSettings}).Append(b) + _, err = str.Write(b) + return err +} + +func (c *SingleDestinationRoundTripper) handleBidirectionalStreams() { + for { + str, err := c.hconn.AcceptStream(context.Background()) + if err != nil { + if c.Debugf != nil { + c.Debugf("accepting bidirectional stream failed: %s", err.Error()) + } + return + } + fp := &frameParser{ + r: str, + conn: c.hconn, + unknownFrameHandler: func(ft FrameType, e error) (processed bool, err error) { + id := c.hconn.Context().Value(quic.ConnectionTracingKey).(quic.ConnectionTracingID) + return c.StreamHijacker(ft, id, str, e) + }, + } + go func() { + if _, err := fp.ParseNext(); err == errHijacked { + return + } + if err != nil { + if c.Debugf != nil { + c.Debugf("error handling stream: %s", err.Error()) + } + } + c.hconn.CloseWithError(quic.ApplicationErrorCode(ErrCodeFrameUnexpected), "received HTTP/3 frame on bidirectional stream") + }() + } +} + +func (c *SingleDestinationRoundTripper) maxHeaderBytes() uint64 { + if c.MaxResponseHeaderBytes <= 0 { + return defaultMaxResponseHeaderBytes + } + return uint64(c.MaxResponseHeaderBytes) +} + +// RoundTrip executes a request and returns a response +func (c *SingleDestinationRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + c.initOnce.Do(func() { c.init() }) + + rsp, err := c.roundTrip(req) + if err != nil && req.Context().Err() != nil { + // if the context was canceled, return the context cancellation error + err = req.Context().Err() + } + return rsp, err +} + +func (c *SingleDestinationRoundTripper) roundTrip(req *http.Request) (*http.Response, error) { + // Immediately send out this request, if this is a 0-RTT request. + switch req.Method { + case MethodGet0RTT: + // don't modify the original request + reqCopy := *req + req = &reqCopy + req.Method = http.MethodGet + case MethodHead0RTT: + // don't modify the original request + reqCopy := *req + req = &reqCopy + req.Method = http.MethodHead + default: + // wait for the handshake to complete + earlyConn, ok := c.Connection.(quic.EarlyConnection) + if ok { + select { + case <-earlyConn.HandshakeComplete(): + case <-req.Context().Done(): + return nil, req.Context().Err() + } + } + } + + // It is only possible to send an Extended CONNECT request once the SETTINGS were received. + // See section 3 of RFC 8441. + if isExtendedConnectRequest(req) { + connCtx := c.Connection.Context() + // wait for the server's SETTINGS frame to arrive + select { + case <-c.hconn.ReceivedSettings(): + case <-connCtx.Done(): + return nil, context.Cause(connCtx) + } + if !c.hconn.Settings().EnableExtendedConnect { + return nil, errors.New("http3: server didn't enable Extended CONNECT") + } + } + + reqDone := make(chan struct{}) + str, err := c.hconn.openRequestStream(req.Context(), c.requestWriter, reqDone, c.DisableCompression, c.maxHeaderBytes()) + if err != nil { + return nil, err + } + + // Request Cancellation: + // This go routine keeps running even after RoundTripOpt() returns. + // It is shut down when the application is done processing the body. + done := make(chan struct{}) + go func() { + defer close(done) + select { + case <-req.Context().Done(): + str.CancelWrite(quic.StreamErrorCode(ErrCodeRequestCanceled)) + str.CancelRead(quic.StreamErrorCode(ErrCodeRequestCanceled)) + case <-reqDone: + } + }() + + rsp, err := c.doRequest(req, str) + if err != nil { // if any error occurred + close(reqDone) + <-done + return nil, maybeReplaceError(err) + } + return rsp, maybeReplaceError(err) +} + +func (c *SingleDestinationRoundTripper) OpenRequestStream(ctx context.Context) (RequestStream, error) { + c.initOnce.Do(func() { c.init() }) + + return c.hconn.openRequestStream(ctx, c.requestWriter, nil, c.DisableCompression, c.maxHeaderBytes()) +} + +// cancelingReader reads from the io.Reader. +// It cancels writing on the stream if any error other than io.EOF occurs. +type cancelingReader struct { + r io.Reader + str Stream +} + +func (r *cancelingReader) Read(b []byte) (int, error) { + n, err := r.r.Read(b) + if err != nil && err != io.EOF { + r.str.CancelWrite(quic.StreamErrorCode(ErrCodeRequestCanceled)) + } + return n, err +} + +func (c *SingleDestinationRoundTripper) sendRequestBody(str Stream, body io.ReadCloser, dumps []*dump.Dumper) error { + defer body.Close() + buf := make([]byte, bodyCopyBufferSize) + sr := &cancelingReader{str: str, r: body} + var w io.Writer = str + if len(dumps) > 0 { + for _, d := range dumps { + w = io.MultiWriter(w, d.RequestBodyOutput()) + } + } + writeTail := func() { + for _, d := range dumps { + d.Output().Write([]byte("\r\n\r\n")) + } + } + written, err := io.CopyBuffer(w, sr, buf) + if len(dumps) > 0 && err == nil && written > 0 { + writeTail() + } + + return err +} + +func (c *SingleDestinationRoundTripper) doRequest(req *http.Request, str *requestStream) (*http.Response, error) { + if err := str.SendRequestHeader(req); err != nil { + return nil, err + } + if req.Body == nil { + str.Close() + } else { + // send the request body asynchronously + go func() { + dumps := dump.GetDumpers(req.Context(), c.Dump) + if err := c.sendRequestBody(str, req.Body, dumps); err != nil { + if c.Debugf != nil { + c.Debugf("error writing request: %s", err.Error()) + } + } + str.Close() + }() + } + + // copy from net/http: support 1xx responses + trace := httptrace.ContextClientTrace(req.Context()) + num1xx := 0 // number of informational 1xx headers received + const max1xxResponses = 5 // arbitrary bound on number of informational responses + + var res *http.Response + for { + var err error + res, err = str.ReadResponse() + if err != nil { + return nil, err + } + resCode := res.StatusCode + is1xx := 100 <= resCode && resCode <= 199 + // treat 101 as a terminal status, see https://github.com/golang/go/issues/26161 + is1xxNonTerminal := is1xx && resCode != http.StatusSwitchingProtocols + if is1xxNonTerminal { + num1xx++ + if num1xx > max1xxResponses { + return nil, errors.New("http: too many 1xx informational responses") + } + if trace != nil && trace.Got1xxResponse != nil { + if err := trace.Got1xxResponse(resCode, textproto.MIMEHeader(res.Header)); err != nil { + return nil, err + } + } + continue + } + break + } + connState := c.hconn.ConnectionState().TLS + res.TLS = &connState + res.Request = req + return res, nil +} diff --git a/internal/http3/conn.go b/internal/http3/conn.go new file mode 100644 index 00000000..fa5302f9 --- /dev/null +++ b/internal/http3/conn.go @@ -0,0 +1,324 @@ +package http3 + +import ( + "context" + "fmt" + "io" + "net" + "net/http" + "sync" + "sync/atomic" + "time" + + "github.com/imroc/req/v3/internal/transport" + "github.com/quic-go/quic-go" + "github.com/quic-go/quic-go/quicvarint" + + "github.com/quic-go/qpack" +) + +// Connection is an HTTP/3 connection. +// It has all methods from the quic.Connection expect for AcceptStream, AcceptUniStream, +// SendDatagram and ReceiveDatagram. +type Connection interface { + OpenStream() (quic.Stream, error) + OpenStreamSync(context.Context) (quic.Stream, error) + OpenUniStream() (quic.SendStream, error) + OpenUniStreamSync(context.Context) (quic.SendStream, error) + LocalAddr() net.Addr + RemoteAddr() net.Addr + CloseWithError(quic.ApplicationErrorCode, string) error + Context() context.Context + ConnectionState() quic.ConnectionState + + // ReceivedSettings returns a channel that is closed once the client's SETTINGS frame was received. + ReceivedSettings() <-chan struct{} + // Settings returns the settings received on this connection. + Settings() *Settings +} + +type connection struct { + quic.Connection + *transport.Options + ctx context.Context + + perspective Perspective + + enableDatagrams bool + + decoder *qpack.Decoder + + streamMx sync.Mutex + streams map[quic.StreamID]*datagrammer + + settings *Settings + receivedSettings chan struct{} + + idleTimeout time.Duration + idleTimer *time.Timer +} + +func newConnection( + ctx context.Context, + quicConn quic.Connection, + enableDatagrams bool, + perspective Perspective, + idleTimeout time.Duration, + options *transport.Options, +) *connection { + c := &connection{ + ctx: ctx, + Connection: quicConn, + Options: options, + perspective: perspective, + idleTimeout: idleTimeout, + enableDatagrams: enableDatagrams, + decoder: qpack.NewDecoder(func(hf qpack.HeaderField) {}), + receivedSettings: make(chan struct{}), + streams: make(map[quic.StreamID]*datagrammer), + } + if idleTimeout > 0 { + c.idleTimer = time.AfterFunc(idleTimeout, c.onIdleTimer) + } + return c +} + +func (c *connection) onIdleTimer() { + c.CloseWithError(quic.ApplicationErrorCode(ErrCodeNoError), "idle timeout") +} + +func (c *connection) clearStream(id quic.StreamID) { + c.streamMx.Lock() + defer c.streamMx.Unlock() + + delete(c.streams, id) + if c.idleTimeout > 0 && len(c.streams) == 0 { + c.idleTimer.Reset(c.idleTimeout) + } +} + +func (c *connection) openRequestStream( + ctx context.Context, + requestWriter *requestWriter, + reqDone chan<- struct{}, + disableCompression bool, + maxHeaderBytes uint64, +) (*requestStream, error) { + str, err := c.Connection.OpenStreamSync(ctx) + if err != nil { + return nil, err + } + datagrams := newDatagrammer(func(b []byte) error { return c.sendDatagram(str.StreamID(), b) }) + c.streamMx.Lock() + c.streams[str.StreamID()] = datagrams + c.streamMx.Unlock() + qstr := newStateTrackingStream(str, c, datagrams) + rsp := &http.Response{} + hstr := newStream(qstr, c, datagrams, func(r io.Reader, l uint64) error { + hdr, err := c.decodeTrailers(r, l, maxHeaderBytes) + if err != nil { + return err + } + rsp.Trailer = hdr + return nil + }) + return newRequestStream(ctx, c.Options, hstr, requestWriter, reqDone, c.decoder, disableCompression, maxHeaderBytes, rsp), nil +} + +func (c *connection) decodeTrailers(r io.Reader, l, maxHeaderBytes uint64) (http.Header, error) { + if l > maxHeaderBytes { + return nil, fmt.Errorf("HEADERS frame too large: %d bytes (max: %d)", l, maxHeaderBytes) + } + + b := make([]byte, l) + if _, err := io.ReadFull(r, b); err != nil { + return nil, err + } + fields, err := c.decoder.DecodeFull(b) + if err != nil { + return nil, err + } + return parseTrailers(fields) +} + +func (c *connection) acceptStream(ctx context.Context) (quic.Stream, *datagrammer, error) { + str, err := c.AcceptStream(ctx) + if err != nil { + return nil, nil, err + } + datagrams := newDatagrammer(func(b []byte) error { return c.sendDatagram(str.StreamID(), b) }) + if c.perspective == PerspectiveServer { + strID := str.StreamID() + c.streamMx.Lock() + c.streams[strID] = datagrams + if c.idleTimeout > 0 { + if len(c.streams) == 1 { + c.idleTimer.Stop() + } + } + c.streamMx.Unlock() + str = newStateTrackingStream(str, c, datagrams) + } + return str, datagrams, nil +} + +func (c *connection) CloseWithError(code quic.ApplicationErrorCode, msg string) error { + if c.idleTimer != nil { + c.idleTimer.Stop() + } + return c.Connection.CloseWithError(code, msg) +} + +func (c *connection) HandleUnidirectionalStreams(hijack func(ServerStreamType, quic.ConnectionTracingID, quic.ReceiveStream, error) (hijacked bool)) { + var ( + rcvdControlStr atomic.Bool + rcvdQPACKEncoderStr atomic.Bool + rcvdQPACKDecoderStr atomic.Bool + ) + + for { + str, err := c.Connection.AcceptUniStream(context.Background()) + if err != nil { + if c.Debugf != nil { + c.Debugf("accepting unidirectional stream failed: %s", err.Error()) + } + return + } + + go func(str quic.ReceiveStream) { + streamType, err := quicvarint.Read(quicvarint.NewReader(str)) + if err != nil { + id := c.Connection.Context().Value(quic.ConnectionTracingKey).(quic.ConnectionTracingID) + if hijack != nil && hijack(ServerStreamType(streamType), id, str, err) { + return + } + if c.Debugf != nil { + c.Debugf("reading stream type on stream failed (id %v): %s", str.StreamID(), err.Error()) + } + return + } + // We're only interested in the control stream here. + switch streamType { + case streamTypeControlStream: + case streamTypeQPACKEncoderStream: + if isFirst := rcvdQPACKEncoderStr.CompareAndSwap(false, true); !isFirst { + c.Connection.CloseWithError(quic.ApplicationErrorCode(ErrCodeStreamCreationError), "duplicate QPACK encoder stream") + } + // Our QPACK implementation doesn't use the dynamic table yet. + return + case streamTypeQPACKDecoderStream: + if isFirst := rcvdQPACKDecoderStr.CompareAndSwap(false, true); !isFirst { + c.Connection.CloseWithError(quic.ApplicationErrorCode(ErrCodeStreamCreationError), "duplicate QPACK decoder stream") + } + // Our QPACK implementation doesn't use the dynamic table yet. + return + case streamTypePushStream: + switch c.perspective { + case PerspectiveClient: + // we never increased the Push ID, so we don't expect any push streams + c.Connection.CloseWithError(quic.ApplicationErrorCode(ErrCodeIDError), "") + case PerspectiveServer: + // only the server can push + c.Connection.CloseWithError(quic.ApplicationErrorCode(ErrCodeStreamCreationError), "") + } + return + default: + if hijack != nil { + if hijack( + ServerStreamType(streamType), + c.Connection.Context().Value(quic.ConnectionTracingKey).(quic.ConnectionTracingID), + str, + nil, + ) { + return + } + } + str.CancelRead(quic.StreamErrorCode(ErrCodeStreamCreationError)) + return + } + // Only a single control stream is allowed. + if isFirstControlStr := rcvdControlStr.CompareAndSwap(false, true); !isFirstControlStr { + c.Connection.CloseWithError(quic.ApplicationErrorCode(ErrCodeStreamCreationError), "duplicate control stream") + return + } + fp := &frameParser{conn: c.Connection, r: str} + f, err := fp.ParseNext() + if err != nil { + c.Connection.CloseWithError(quic.ApplicationErrorCode(ErrCodeFrameError), "") + return + } + sf, ok := f.(*settingsFrame) + if !ok { + c.Connection.CloseWithError(quic.ApplicationErrorCode(ErrCodeMissingSettings), "") + return + } + c.settings = &Settings{ + EnableDatagrams: sf.Datagram, + EnableExtendedConnect: sf.ExtendedConnect, + Other: sf.Other, + } + close(c.receivedSettings) + if !sf.Datagram { + return + } + // If datagram support was enabled on our side as well as on the server side, + // we can expect it to have been negotiated both on the transport and on the HTTP/3 layer. + // Note: ConnectionState() will block until the handshake is complete (relevant when using 0-RTT). + if c.enableDatagrams && !c.Connection.ConnectionState().SupportsDatagrams { + c.Connection.CloseWithError(quic.ApplicationErrorCode(ErrCodeSettingsError), "missing QUIC Datagram support") + return + } + go func() { + if err := c.receiveDatagrams(); err != nil { + if c.Debugf != nil { + c.Debugf("receiving datagrams failed: %s", err.Error()) + } + } + }() + }(str) + } +} + +func (c *connection) sendDatagram(streamID quic.StreamID, b []byte) error { + // TODO: this creates a lot of garbage and an additional copy + data := make([]byte, 0, len(b)+8) + data = quicvarint.Append(data, uint64(streamID/4)) + data = append(data, b...) + return c.Connection.SendDatagram(data) +} + +func (c *connection) receiveDatagrams() error { + for { + b, err := c.Connection.ReceiveDatagram(context.Background()) + if err != nil { + return err + } + quarterStreamID, n, err := quicvarint.Parse(b) + if err != nil { + c.Connection.CloseWithError(quic.ApplicationErrorCode(ErrCodeDatagramError), "") + return fmt.Errorf("could not read quarter stream id: %w", err) + } + if quarterStreamID > maxQuarterStreamID { + c.Connection.CloseWithError(quic.ApplicationErrorCode(ErrCodeDatagramError), "") + return fmt.Errorf("invalid quarter stream id: %w", err) + } + streamID := quic.StreamID(4 * quarterStreamID) + c.streamMx.Lock() + dg, ok := c.streams[streamID] + if !ok { + c.streamMx.Unlock() + return nil + } + c.streamMx.Unlock() + dg.enqueue(b[n:]) + } +} + +// ReceivedSettings returns a channel that is closed once the peer's SETTINGS frame was received. +func (c *connection) ReceivedSettings() <-chan struct{} { return c.receivedSettings } + +// Settings returns the settings received on this connection. +// It is only valid to call this function after the channel returned by ReceivedSettings was closed. +func (c *connection) Settings() *Settings { return c.settings } + +func (c *connection) Context() context.Context { return c.ctx } diff --git a/internal/http3/datagram.go b/internal/http3/datagram.go new file mode 100644 index 00000000..6d570e6b --- /dev/null +++ b/internal/http3/datagram.go @@ -0,0 +1,98 @@ +package http3 + +import ( + "context" + "sync" +) + +const maxQuarterStreamID = 1<<60 - 1 + +const streamDatagramQueueLen = 32 + +type datagrammer struct { + sendDatagram func([]byte) error + + hasData chan struct{} + queue [][]byte // TODO: use a ring buffer + + mx sync.Mutex + sendErr error + receiveErr error +} + +func newDatagrammer(sendDatagram func([]byte) error) *datagrammer { + return &datagrammer{ + sendDatagram: sendDatagram, + hasData: make(chan struct{}, 1), + } +} + +func (d *datagrammer) SetReceiveError(err error) { + d.mx.Lock() + defer d.mx.Unlock() + + d.receiveErr = err + d.signalHasData() +} + +func (d *datagrammer) SetSendError(err error) { + d.mx.Lock() + defer d.mx.Unlock() + + d.sendErr = err +} + +func (d *datagrammer) Send(b []byte) error { + d.mx.Lock() + sendErr := d.sendErr + d.mx.Unlock() + if sendErr != nil { + return sendErr + } + + return d.sendDatagram(b) +} + +func (d *datagrammer) signalHasData() { + select { + case d.hasData <- struct{}{}: + default: + } +} + +func (d *datagrammer) enqueue(data []byte) { + d.mx.Lock() + defer d.mx.Unlock() + + if d.receiveErr != nil { + return + } + if len(d.queue) >= streamDatagramQueueLen { + return + } + d.queue = append(d.queue, data) + d.signalHasData() +} + +func (d *datagrammer) Receive(ctx context.Context) ([]byte, error) { +start: + d.mx.Lock() + if len(d.queue) >= 1 { + data := d.queue[0] + d.queue = d.queue[1:] + d.mx.Unlock() + return data, nil + } + if receiveErr := d.receiveErr; receiveErr != nil { + d.mx.Unlock() + return nil, receiveErr + } + d.mx.Unlock() + + select { + case <-ctx.Done(): + return nil, context.Cause(ctx) + case <-d.hasData: + } + goto start +} diff --git a/internal/http3/error.go b/internal/http3/error.go new file mode 100644 index 00000000..b96ebeec --- /dev/null +++ b/internal/http3/error.go @@ -0,0 +1,58 @@ +package http3 + +import ( + "errors" + "fmt" + + "github.com/quic-go/quic-go" +) + +// Error is returned from the round tripper (for HTTP clients) +// and inside the HTTP handler (for HTTP servers) if an HTTP/3 error occurs. +// See section 8 of RFC 9114. +type Error struct { + Remote bool + ErrorCode ErrCode + ErrorMessage string +} + +var _ error = &Error{} + +func (e *Error) Error() string { + s := e.ErrorCode.string() + if s == "" { + s = fmt.Sprintf("H3 error (%#x)", uint64(e.ErrorCode)) + } + // Usually errors are remote. Only make it explicit for local errors. + if !e.Remote { + s += " (local)" + } + if e.ErrorMessage != "" { + s += ": " + e.ErrorMessage + } + return s +} + +func maybeReplaceError(err error) error { + if err == nil { + return nil + } + + var ( + e Error + strErr *quic.StreamError + appErr *quic.ApplicationError + ) + switch { + default: + return err + case errors.As(err, &strErr): + e.Remote = strErr.Remote + e.ErrorCode = ErrCode(strErr.ErrorCode) + case errors.As(err, &appErr): + e.Remote = appErr.Remote + e.ErrorCode = ErrCode(appErr.ErrorCode) + e.ErrorMessage = appErr.ErrorMessage + } + return &e +} diff --git a/internal/http3/error_codes.go b/internal/http3/error_codes.go new file mode 100644 index 00000000..ae646586 --- /dev/null +++ b/internal/http3/error_codes.go @@ -0,0 +1,81 @@ +package http3 + +import ( + "fmt" + + "github.com/quic-go/quic-go" +) + +type ErrCode quic.ApplicationErrorCode + +const ( + ErrCodeNoError ErrCode = 0x100 + ErrCodeGeneralProtocolError ErrCode = 0x101 + ErrCodeInternalError ErrCode = 0x102 + ErrCodeStreamCreationError ErrCode = 0x103 + ErrCodeClosedCriticalStream ErrCode = 0x104 + ErrCodeFrameUnexpected ErrCode = 0x105 + ErrCodeFrameError ErrCode = 0x106 + ErrCodeExcessiveLoad ErrCode = 0x107 + ErrCodeIDError ErrCode = 0x108 + ErrCodeSettingsError ErrCode = 0x109 + ErrCodeMissingSettings ErrCode = 0x10a + ErrCodeRequestRejected ErrCode = 0x10b + ErrCodeRequestCanceled ErrCode = 0x10c + ErrCodeRequestIncomplete ErrCode = 0x10d + ErrCodeMessageError ErrCode = 0x10e + ErrCodeConnectError ErrCode = 0x10f + ErrCodeVersionFallback ErrCode = 0x110 + ErrCodeDatagramError ErrCode = 0x33 +) + +func (e ErrCode) String() string { + s := e.string() + if s != "" { + return s + } + return fmt.Sprintf("unknown error code: %#x", uint16(e)) +} + +func (e ErrCode) string() string { + switch e { + case ErrCodeNoError: + return "H3_NO_ERROR" + case ErrCodeGeneralProtocolError: + return "H3_GENERAL_PROTOCOL_ERROR" + case ErrCodeInternalError: + return "H3_INTERNAL_ERROR" + case ErrCodeStreamCreationError: + return "H3_STREAM_CREATION_ERROR" + case ErrCodeClosedCriticalStream: + return "H3_CLOSED_CRITICAL_STREAM" + case ErrCodeFrameUnexpected: + return "H3_FRAME_UNEXPECTED" + case ErrCodeFrameError: + return "H3_FRAME_ERROR" + case ErrCodeExcessiveLoad: + return "H3_EXCESSIVE_LOAD" + case ErrCodeIDError: + return "H3_ID_ERROR" + case ErrCodeSettingsError: + return "H3_SETTINGS_ERROR" + case ErrCodeMissingSettings: + return "H3_MISSING_SETTINGS" + case ErrCodeRequestRejected: + return "H3_REQUEST_REJECTED" + case ErrCodeRequestCanceled: + return "H3_REQUEST_CANCELLED" + case ErrCodeRequestIncomplete: + return "H3_INCOMPLETE_REQUEST" + case ErrCodeMessageError: + return "H3_MESSAGE_ERROR" + case ErrCodeConnectError: + return "H3_CONNECT_ERROR" + case ErrCodeVersionFallback: + return "H3_VERSION_FALLBACK" + case ErrCodeDatagramError: + return "H3_DATAGRAM_ERROR" + default: + return "" + } +} diff --git a/internal/http3/frames.go b/internal/http3/frames.go new file mode 100644 index 00000000..b2d59a52 --- /dev/null +++ b/internal/http3/frames.go @@ -0,0 +1,196 @@ +package http3 + +import ( + "bytes" + "errors" + "fmt" + "io" + + "github.com/imroc/req/v3/internal/quic-go/quicvarint" + "github.com/quic-go/quic-go" +) + +// FrameType is the frame type of a HTTP/3 frame +type FrameType uint64 + +type unknownFrameHandlerFunc func(FrameType, error) (processed bool, err error) + +type frame interface{} + +var errHijacked = errors.New("hijacked") + +type frameParser struct { + r io.Reader + conn quic.Connection + unknownFrameHandler unknownFrameHandlerFunc +} + +func (p *frameParser) ParseNext() (frame, error) { + qr := quicvarint.NewReader(p.r) + for { + t, err := quicvarint.Read(qr) + if err != nil { + if p.unknownFrameHandler != nil { + hijacked, err := p.unknownFrameHandler(0, err) + if err != nil { + return nil, err + } + if hijacked { + return nil, errHijacked + } + } + return nil, err + } + // Call the unknownFrameHandler for frames not defined in the HTTP/3 spec + if t > 0xd && p.unknownFrameHandler != nil { + hijacked, err := p.unknownFrameHandler(FrameType(t), nil) + if err != nil { + return nil, err + } + if hijacked { + return nil, errHijacked + } + // If the unknownFrameHandler didn't process the frame, it is our responsibility to skip it. + } + l, err := quicvarint.Read(qr) + if err != nil { + return nil, err + } + + switch t { + case 0x0: + return &dataFrame{Length: l}, nil + case 0x1: + return &headersFrame{Length: l}, nil + case 0x4: + return parseSettingsFrame(p.r, l) + case 0x3: // CANCEL_PUSH + case 0x5: // PUSH_PROMISE + case 0x7: // GOAWAY + case 0xd: // MAX_PUSH_ID + case 0x2, 0x6, 0x8, 0x9: + p.conn.CloseWithError(quic.ApplicationErrorCode(ErrCodeFrameUnexpected), "") + return nil, fmt.Errorf("http3: reserved frame type: %d", t) + } + // skip over unknown frames + if _, err := io.CopyN(io.Discard, qr, int64(l)); err != nil { + return nil, err + } + } +} + +type dataFrame struct { + Length uint64 +} + +func (f *dataFrame) Append(b []byte) []byte { + b = quicvarint.Append(b, 0x0) + return quicvarint.Append(b, f.Length) +} + +type headersFrame struct { + Length uint64 +} + +func (f *headersFrame) Append(b []byte) []byte { + b = quicvarint.Append(b, 0x1) + return quicvarint.Append(b, f.Length) +} + +const ( + // Extended CONNECT, RFC 9220 + settingExtendedConnect = 0x8 + // HTTP Datagrams, RFC 9297 + settingDatagram = 0x33 +) + +type settingsFrame struct { + Datagram bool // HTTP Datagrams, RFC 9297 + ExtendedConnect bool // Extended CONNECT, RFC 9220 + + Other map[uint64]uint64 // all settings that we don't explicitly recognize +} + +func parseSettingsFrame(r io.Reader, l uint64) (*settingsFrame, error) { + if l > 8*(1<<10) { + return nil, fmt.Errorf("unexpected size for SETTINGS frame: %d", l) + } + buf := make([]byte, l) + if _, err := io.ReadFull(r, buf); err != nil { + if err == io.ErrUnexpectedEOF { + return nil, io.EOF + } + return nil, err + } + frame := &settingsFrame{} + b := bytes.NewReader(buf) + var readDatagram, readExtendedConnect bool + for b.Len() > 0 { + id, err := quicvarint.Read(b) + if err != nil { // should not happen. We allocated the whole frame already. + return nil, err + } + val, err := quicvarint.Read(b) + if err != nil { // should not happen. We allocated the whole frame already. + return nil, err + } + + switch id { + case settingExtendedConnect: + if readExtendedConnect { + return nil, fmt.Errorf("duplicate setting: %d", id) + } + readExtendedConnect = true + if val != 0 && val != 1 { + return nil, fmt.Errorf("invalid value for SETTINGS_ENABLE_CONNECT_PROTOCOL: %d", val) + } + frame.ExtendedConnect = val == 1 + case settingDatagram: + if readDatagram { + return nil, fmt.Errorf("duplicate setting: %d", id) + } + readDatagram = true + if val != 0 && val != 1 { + return nil, fmt.Errorf("invalid value for SETTINGS_H3_DATAGRAM: %d", val) + } + frame.Datagram = val == 1 + default: + if _, ok := frame.Other[id]; ok { + return nil, fmt.Errorf("duplicate setting: %d", id) + } + if frame.Other == nil { + frame.Other = make(map[uint64]uint64) + } + frame.Other[id] = val + } + } + return frame, nil +} + +func (f *settingsFrame) Append(b []byte) []byte { + b = quicvarint.Append(b, 0x4) + var l int + for id, val := range f.Other { + l += quicvarint.Len(id) + quicvarint.Len(val) + } + if f.Datagram { + l += quicvarint.Len(settingDatagram) + quicvarint.Len(1) + } + if f.ExtendedConnect { + l += quicvarint.Len(settingExtendedConnect) + quicvarint.Len(1) + } + b = quicvarint.Append(b, uint64(l)) + if f.Datagram { + b = quicvarint.Append(b, settingDatagram) + b = quicvarint.Append(b, 1) + } + if f.ExtendedConnect { + b = quicvarint.Append(b, settingExtendedConnect) + b = quicvarint.Append(b, 1) + } + for id, val := range f.Other { + b = quicvarint.Append(b, id) + b = quicvarint.Append(b, val) + } + return b +} diff --git a/internal/http3/headers.go b/internal/http3/headers.go new file mode 100644 index 00000000..cbd79ecd --- /dev/null +++ b/internal/http3/headers.go @@ -0,0 +1,259 @@ +package http3 + +import ( + "errors" + "fmt" + "net/http" + "net/textproto" + "net/url" + "strconv" + "strings" + + "github.com/quic-go/qpack" + "golang.org/x/net/http/httpguts" +) + +type header struct { + // Pseudo header fields defined in RFC 9114 + Path string + Method string + Authority string + Scheme string + Status string + // for Extended connect + Protocol string + // parsed and deduplicated. -1 if no Content-Length header is sent + ContentLength int64 + // all non-pseudo headers + Headers http.Header +} + +// connection-specific header fields must not be sent on HTTP/3 +var invalidHeaderFields = [...]string{ + "connection", + "keep-alive", + "proxy-connection", + "transfer-encoding", + "upgrade", +} + +func parseHeaders(headers []qpack.HeaderField, isRequest bool) (header, error) { + hdr := header{Headers: make(http.Header, len(headers))} + var readFirstRegularHeader, readContentLength bool + var contentLengthStr string + for _, h := range headers { + // field names need to be lowercase, see section 4.2 of RFC 9114 + if strings.ToLower(h.Name) != h.Name { + return header{}, fmt.Errorf("header field is not lower-case: %s", h.Name) + } + if !httpguts.ValidHeaderFieldValue(h.Value) { + return header{}, fmt.Errorf("invalid header field value for %s: %q", h.Name, h.Value) + } + if h.IsPseudo() { + if readFirstRegularHeader { + // all pseudo headers must appear before regular header fields, see section 4.3 of RFC 9114 + return header{}, fmt.Errorf("received pseudo header %s after a regular header field", h.Name) + } + var isResponsePseudoHeader bool // pseudo headers are either valid for requests or for responses + switch h.Name { + case ":path": + hdr.Path = h.Value + case ":method": + hdr.Method = h.Value + case ":authority": + hdr.Authority = h.Value + case ":protocol": + hdr.Protocol = h.Value + case ":scheme": + hdr.Scheme = h.Value + case ":status": + hdr.Status = h.Value + isResponsePseudoHeader = true + default: + return header{}, fmt.Errorf("unknown pseudo header: %s", h.Name) + } + if isRequest && isResponsePseudoHeader { + return header{}, fmt.Errorf("invalid request pseudo header: %s", h.Name) + } + if !isRequest && !isResponsePseudoHeader { + return header{}, fmt.Errorf("invalid response pseudo header: %s", h.Name) + } + } else { + if !httpguts.ValidHeaderFieldName(h.Name) { + return header{}, fmt.Errorf("invalid header field name: %q", h.Name) + } + for _, invalidField := range invalidHeaderFields { + if h.Name == invalidField { + return header{}, fmt.Errorf("invalid header field name: %q", h.Name) + } + } + if h.Name == "te" && h.Value != "trailers" { + return header{}, fmt.Errorf("invalid TE header field value: %q", h.Value) + } + readFirstRegularHeader = true + switch h.Name { + case "content-length": + // Ignore duplicate Content-Length headers. + // Fail if the duplicates differ. + if !readContentLength { + readContentLength = true + contentLengthStr = h.Value + } else if contentLengthStr != h.Value { + return header{}, fmt.Errorf("contradicting content lengths (%s and %s)", contentLengthStr, h.Value) + } + default: + hdr.Headers.Add(h.Name, h.Value) + } + } + } + hdr.ContentLength = -1 + if len(contentLengthStr) > 0 { + // use ParseUint instead of ParseInt, so that parsing fails on negative values + cl, err := strconv.ParseUint(contentLengthStr, 10, 63) + if err != nil { + return header{}, fmt.Errorf("invalid content length: %w", err) + } + hdr.Headers.Set("Content-Length", contentLengthStr) + hdr.ContentLength = int64(cl) + } + return hdr, nil +} + +func parseTrailers(headers []qpack.HeaderField) (http.Header, error) { + h := make(http.Header, len(headers)) + for _, field := range headers { + if field.IsPseudo() { + return nil, fmt.Errorf("http3: received pseudo header in trailer: %s", field.Name) + } + h.Add(field.Name, field.Value) + } + return h, nil +} + +func requestFromHeaders(headerFields []qpack.HeaderField) (*http.Request, error) { + hdr, err := parseHeaders(headerFields, true) + if err != nil { + return nil, err + } + // concatenate cookie headers, see https://tools.ietf.org/html/rfc6265#section-5.4 + if len(hdr.Headers["Cookie"]) > 0 { + hdr.Headers.Set("Cookie", strings.Join(hdr.Headers["Cookie"], "; ")) + } + + isConnect := hdr.Method == http.MethodConnect + // Extended CONNECT, see https://datatracker.ietf.org/doc/html/rfc8441#section-4 + isExtendedConnected := isConnect && hdr.Protocol != "" + if isExtendedConnected { + if hdr.Scheme == "" || hdr.Path == "" || hdr.Authority == "" { + return nil, errors.New("extended CONNECT: :scheme, :path and :authority must not be empty") + } + } else if isConnect { + if hdr.Path != "" || hdr.Authority == "" { // normal CONNECT + return nil, errors.New(":path must be empty and :authority must not be empty") + } + } else if len(hdr.Path) == 0 || len(hdr.Authority) == 0 || len(hdr.Method) == 0 { + return nil, errors.New(":path, :authority and :method must not be empty") + } + + if !isExtendedConnected && len(hdr.Protocol) > 0 { + return nil, errors.New(":protocol must be empty") + } + + var u *url.URL + var requestURI string + + protocol := "HTTP/3.0" + + if isConnect { + u = &url.URL{} + if isExtendedConnected { + u, err = url.ParseRequestURI(hdr.Path) + if err != nil { + return nil, err + } + protocol = hdr.Protocol + } else { + u.Path = hdr.Path + } + u.Scheme = hdr.Scheme + u.Host = hdr.Authority + requestURI = hdr.Authority + } else { + u, err = url.ParseRequestURI(hdr.Path) + if err != nil { + return nil, fmt.Errorf("invalid content length: %w", err) + } + requestURI = hdr.Path + } + + return &http.Request{ + Method: hdr.Method, + URL: u, + Proto: protocol, + ProtoMajor: 3, + ProtoMinor: 0, + Header: hdr.Headers, + Body: nil, + ContentLength: hdr.ContentLength, + Host: hdr.Authority, + RequestURI: requestURI, + }, nil +} + +func hostnameFromURL(url *url.URL) string { + if url != nil { + return url.Host + } + return "" +} + +// updateResponseFromHeaders sets up http.Response as an HTTP/3 response, +// using the decoded qpack header filed. +// It is only called for the HTTP header (and not the HTTP trailer). +// It takes an http.Response as an argument to allow the caller to set the trailer later on. +func updateResponseFromHeaders(rsp *http.Response, headerFields []qpack.HeaderField) error { + hdr, err := parseHeaders(headerFields, false) + if err != nil { + return err + } + if hdr.Status == "" { + return errors.New("missing status field") + } + rsp.Proto = "HTTP/3.0" + rsp.ProtoMajor = 3 + rsp.Header = hdr.Headers + processTrailers(rsp) + rsp.ContentLength = hdr.ContentLength + + status, err := strconv.Atoi(hdr.Status) + if err != nil { + return fmt.Errorf("invalid status code: %w", err) + } + rsp.StatusCode = status + rsp.Status = hdr.Status + " " + http.StatusText(status) + return nil +} + +// processTrailers initializes the rsp.Trailer map, and adds keys for every announced header value. +// The Trailer header is removed from the http.Response.Header map. +// It handles both duplicate as well as comma-separated values for the Trailer header. +// For example: +// +// Trailer: Trailer1, Trailer2 +// Trailer: Trailer3 +// +// Will result in a http.Response.Trailer map containing the keys "Trailer1", "Trailer2", "Trailer3". +func processTrailers(rsp *http.Response) { + rawTrailers, ok := rsp.Header["Trailer"] + if !ok { + return + } + + rsp.Trailer = make(http.Header) + for _, rawVal := range rawTrailers { + for _, val := range strings.Split(rawVal, ",") { + rsp.Trailer[http.CanonicalHeaderKey(textproto.TrimString(val))] = nil + } + } + delete(rsp.Header, "Trailer") +} diff --git a/internal/http3/http_stream.go b/internal/http3/http_stream.go new file mode 100644 index 00000000..7c969090 --- /dev/null +++ b/internal/http3/http_stream.go @@ -0,0 +1,303 @@ +package http3 + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + + "github.com/imroc/req/v3/internal/compress" + "github.com/imroc/req/v3/internal/dump" + "github.com/imroc/req/v3/internal/transport" + + "github.com/quic-go/quic-go" + + "github.com/quic-go/qpack" +) + +// A Stream is an HTTP/3 request stream. +// When writing to and reading from the stream, data is framed in HTTP/3 DATA frames. +type Stream interface { + quic.Stream + + SendDatagram([]byte) error + ReceiveDatagram(context.Context) ([]byte, error) +} + +// A RequestStream is an HTTP/3 request stream. +// When writing to and reading from the stream, data is framed in HTTP/3 DATA frames. +type RequestStream interface { + Stream + + // SendRequestHeader sends the HTTP request. + // It is invalid to call it more than once. + // It is invalid to call it after Write has been called. + SendRequestHeader(req *http.Request) error + + // ReadResponse reads the HTTP response from the stream. + // It is invalid to call it more than once. + // It doesn't set Response.Request and Response.TLS. + // It is invalid to call it after Read has been called. + ReadResponse() (*http.Response, error) +} + +type stream struct { + quic.Stream + conn *connection + + buf []byte // used as a temporary buffer when writing the HTTP/3 frame headers + + bytesRemainingInFrame uint64 + + datagrams *datagrammer + + parseTrailer func(io.Reader, uint64) error + parsedTrailer bool +} + +var _ Stream = &stream{} + +func newStream(str quic.Stream, conn *connection, datagrams *datagrammer, parseTrailer func(io.Reader, uint64) error) *stream { + return &stream{ + Stream: str, + conn: conn, + buf: make([]byte, 16), + datagrams: datagrams, + parseTrailer: parseTrailer, + } +} + +func (s *stream) Read(b []byte) (int, error) { + fp := &frameParser{ + r: s.Stream, + conn: s.conn, + } + if s.bytesRemainingInFrame == 0 { + parseLoop: + for { + frame, err := fp.ParseNext() + if err != nil { + return 0, err + } + switch f := frame.(type) { + case *dataFrame: + if s.parsedTrailer { + return 0, errors.New("DATA frame received after trailers") + } + s.bytesRemainingInFrame = f.Length + break parseLoop + case *headersFrame: + if s.conn.perspective == PerspectiveServer { + continue + } + if s.parsedTrailer { + return 0, errors.New("additional HEADERS frame received after trailers") + } + s.parsedTrailer = true + return 0, s.parseTrailer(s.Stream, f.Length) + default: + s.conn.CloseWithError(quic.ApplicationErrorCode(ErrCodeFrameUnexpected), "") + // parseNextFrame skips over unknown frame types + // Therefore, this condition is only entered when we parsed another known frame type. + return 0, fmt.Errorf("peer sent an unexpected frame: %T", f) + } + } + } + + var n int + var err error + if s.bytesRemainingInFrame < uint64(len(b)) { + n, err = s.Stream.Read(b[:s.bytesRemainingInFrame]) + } else { + n, err = s.Stream.Read(b) + } + s.bytesRemainingInFrame -= uint64(n) + return n, err +} + +func (s *stream) hasMoreData() bool { + return s.bytesRemainingInFrame > 0 +} + +func (s *stream) Write(b []byte) (int, error) { + s.buf = s.buf[:0] + s.buf = (&dataFrame{Length: uint64(len(b))}).Append(s.buf) + if _, err := s.Stream.Write(s.buf); err != nil { + return 0, err + } + return s.Stream.Write(b) +} + +func (s *stream) writeUnframed(b []byte) (int, error) { + return s.Stream.Write(b) +} + +func (s *stream) StreamID() quic.StreamID { + return s.Stream.StreamID() +} + +// The stream conforms to the quic.Stream interface, but instead of writing to and reading directly +// from the QUIC stream, it writes to and reads from the HTTP stream. +type requestStream struct { + ctx context.Context + *stream + *transport.Options + + responseBody io.ReadCloser // set by ReadResponse + + decoder *qpack.Decoder + requestWriter *requestWriter + maxHeaderBytes uint64 + reqDone chan<- struct{} + disableCompression bool + response *http.Response + + sentRequest bool + requestedGzip bool + isConnect bool +} + +var _ RequestStream = &requestStream{} + +func newRequestStream( + ctx context.Context, + options *transport.Options, + str *stream, + requestWriter *requestWriter, + reqDone chan<- struct{}, + decoder *qpack.Decoder, + disableCompression bool, + maxHeaderBytes uint64, + rsp *http.Response, +) *requestStream { + return &requestStream{ + ctx: ctx, + Options: options, + stream: str, + requestWriter: requestWriter, + reqDone: reqDone, + decoder: decoder, + disableCompression: disableCompression, + maxHeaderBytes: maxHeaderBytes, + response: rsp, + } +} + +func (s *requestStream) Read(b []byte) (int, error) { + if s.responseBody == nil { + return 0, errors.New("http3: invalid use of RequestStream.Read: need to call ReadResponse first") + } + return s.responseBody.Read(b) +} + +func (s *requestStream) SendRequestHeader(req *http.Request) error { + if s.sentRequest { + return errors.New("http3: invalid duplicate use of SendRequestHeader") + } + if !s.DisableCompression && !s.disableCompression && req.Method != http.MethodHead && + req.Header.Get("Accept-Encoding") == "" && req.Header.Get("Range") == "" { + s.requestedGzip = true + } + dumps := dump.GetDumpers(req.Context(), s.Dump) + var headerDumps []*dump.Dumper + for _, dump := range dumps { + if dump.RequestHeader() { + headerDumps = append(headerDumps, dump) + } + } + + s.isConnect = req.Method == http.MethodConnect + s.sentRequest = true + return s.requestWriter.WriteRequestHeader(s.Stream, req, s.requestedGzip, headerDumps) +} + +func (s *requestStream) ReadResponse() (*http.Response, error) { + fp := &frameParser{ + r: s.Stream, + conn: s.conn, + } + frame, err := fp.ParseNext() + if err != nil { + s.Stream.CancelRead(quic.StreamErrorCode(ErrCodeFrameError)) + s.Stream.CancelWrite(quic.StreamErrorCode(ErrCodeFrameError)) + return nil, fmt.Errorf("http3: parsing frame failed: %w", err) + } + hf, ok := frame.(*headersFrame) + if !ok { + s.conn.CloseWithError(quic.ApplicationErrorCode(ErrCodeFrameUnexpected), "expected first frame to be a HEADERS frame") + return nil, errors.New("http3: expected first frame to be a HEADERS frame") + } + if hf.Length > s.maxHeaderBytes { + s.Stream.CancelRead(quic.StreamErrorCode(ErrCodeFrameError)) + s.Stream.CancelWrite(quic.StreamErrorCode(ErrCodeFrameError)) + return nil, fmt.Errorf("http3: HEADERS frame too large: %d bytes (max: %d)", hf.Length, s.maxHeaderBytes) + } + headerBlock := make([]byte, hf.Length) + if _, err := io.ReadFull(s.Stream, headerBlock); err != nil { + s.Stream.CancelRead(quic.StreamErrorCode(ErrCodeRequestIncomplete)) + s.Stream.CancelWrite(quic.StreamErrorCode(ErrCodeRequestIncomplete)) + return nil, fmt.Errorf("http3: failed to read response headers: %w", err) + } + hfs, err := s.decoder.DecodeFull(headerBlock) + if err != nil { + // TODO: use the right error code + s.conn.CloseWithError(quic.ApplicationErrorCode(ErrCodeGeneralProtocolError), "") + return nil, fmt.Errorf("http3: failed to decode response headers: %w", err) + } + ds := dump.GetResponseHeaderDumpers(s.ctx, s.Dump) + if ds.ShouldDump() { + for _, h := range hfs { + ds.DumpResponseHeader([]byte(fmt.Sprintf("%s: %s\r\n", h.Name, h.Value))) + } + ds.DumpResponseHeader([]byte("\r\n")) + } + res := s.response + if err := updateResponseFromHeaders(res, hfs); err != nil { + s.Stream.CancelRead(quic.StreamErrorCode(ErrCodeMessageError)) + s.Stream.CancelWrite(quic.StreamErrorCode(ErrCodeMessageError)) + return nil, fmt.Errorf("http3: invalid response: %w", err) + } + + // Check that the server doesn't send more data in DATA frames than indicated by the Content-Length header (if set). + // See section 4.1.2 of RFC 9114. + respBody := newResponseBody(s.stream, res.ContentLength, s.reqDone) + + // Rules for when to set Content-Length are defined in https://tools.ietf.org/html/rfc7230#section-3.3.2. + isInformational := res.StatusCode >= 100 && res.StatusCode < 200 + isNoContent := res.StatusCode == http.StatusNoContent + isSuccessfulConnect := s.isConnect && res.StatusCode >= 200 && res.StatusCode < 300 + if (isInformational || isNoContent || isSuccessfulConnect) && res.ContentLength == -1 { + res.ContentLength = 0 + } + if s.requestedGzip && res.Header.Get("Content-Encoding") == "gzip" { + res.Header.Del("Content-Encoding") + res.Header.Del("Content-Length") + res.ContentLength = -1 + s.responseBody = compress.NewGzipReader(respBody) + res.Uncompressed = true + } else if s.AutoDecompression { + contentEncoding := res.Header.Get("Content-Encoding") + if contentEncoding != "" { + res.Header.Del("Content-Encoding") + res.Header.Del("Content-Length") + res.ContentLength = -1 + res.Uncompressed = true + res.Body = compress.NewCompressReader(respBody, contentEncoding) + } + } else { + s.responseBody = respBody + } + res.Body = s.responseBody + return res, nil +} + +func (s *stream) SendDatagram(b []byte) error { + // TODO: reject if datagrams are not negotiated (yet) + return s.datagrams.Send(b) +} + +func (s *stream) ReceiveDatagram(ctx context.Context) ([]byte, error) { + // TODO: reject if datagrams are not negotiated (yet) + return s.datagrams.Receive(ctx) +} diff --git a/internal/http3/protocol.go b/internal/http3/protocol.go new file mode 100644 index 00000000..d5ba5bb6 --- /dev/null +++ b/internal/http3/protocol.go @@ -0,0 +1,119 @@ +package http3 + +import ( + "math" + + "github.com/quic-go/quic-go" +) + +// Perspective determines if we're acting as a server or a client +type Perspective int + +// the perspectives +const ( + PerspectiveServer Perspective = 1 + PerspectiveClient Perspective = 2 +) + +// Opposite returns the perspective of the peer +func (p Perspective) Opposite() Perspective { + return 3 - p +} + +func (p Perspective) String() string { + switch p { + case PerspectiveServer: + return "server" + case PerspectiveClient: + return "client" + default: + return "invalid perspective" + } +} + +// The version numbers, making grepping easier +const ( + VersionUnknown quic.Version = math.MaxUint32 + versionDraft29 quic.Version = 0xff00001d // draft-29 used to be a widely deployed version + Version1 quic.Version = 0x1 + Version2 quic.Version = 0x6b3343cf +) + +// SupportedVersions lists the versions that the server supports +// must be in sorted descending order +var SupportedVersions = []quic.Version{Version1, Version2} + +// StreamType encodes if this is a unidirectional or bidirectional stream +type StreamType uint8 + +const ( + // StreamTypeUni is a unidirectional stream + StreamTypeUni StreamType = iota + // StreamTypeBidi is a bidirectional stream + StreamTypeBidi +) + +// A StreamID in QUIC +type StreamID int64 + +// InitiatedBy says if the stream was initiated by the client or by the server +func (s StreamID) InitiatedBy() Perspective { + if s%2 == 0 { + return PerspectiveClient + } + return PerspectiveServer +} + +// Type says if this is a unidirectional or bidirectional stream +func (s StreamID) Type() StreamType { + if s%4 >= 2 { + return StreamTypeUni + } + return StreamTypeBidi +} + +// StreamNum returns how many streams in total are below this +// Example: for stream 9 it returns 3 (i.e. streams 1, 5 and 9) +func (s StreamID) StreamNum() StreamNum { + return StreamNum(s/4) + 1 +} + +// InvalidPacketNumber is a stream ID that is invalid. +// The first valid stream ID in QUIC is 0. +const InvalidStreamID StreamID = -1 + +// StreamNum is the stream number +type StreamNum int64 + +const ( + // InvalidStreamNum is an invalid stream number. + InvalidStreamNum = -1 + // MaxStreamCount is the maximum stream count value that can be sent in MAX_STREAMS frames + // and as the stream count in the transport parameters + MaxStreamCount StreamNum = 1 << 60 +) + +// StreamID calculates the stream ID. +func (s StreamNum) StreamID(stype StreamType, pers Perspective) StreamID { + if s == 0 { + return InvalidStreamID + } + var first StreamID + switch stype { + case StreamTypeBidi: + switch pers { + case PerspectiveClient: + first = 0 + case PerspectiveServer: + first = 1 + } + case StreamTypeUni: + switch pers { + case PerspectiveClient: + first = 2 + case PerspectiveServer: + first = 3 + } + } + return first + 4*StreamID(s-1) +} diff --git a/internal/http3/request_writer.go b/internal/http3/request_writer.go new file mode 100644 index 00000000..2af1ce3f --- /dev/null +++ b/internal/http3/request_writer.go @@ -0,0 +1,339 @@ +package http3 + +import ( + "bytes" + "errors" + "fmt" + "io" + "net" + "net/http" + "net/http/httptrace" + "strconv" + "strings" + "sync" + + "github.com/imroc/req/v3/internal/dump" + reqheader "github.com/imroc/req/v3/internal/header" + "github.com/quic-go/qpack" + + "github.com/quic-go/quic-go" + "golang.org/x/net/http/httpguts" + "golang.org/x/net/http2/hpack" + "golang.org/x/net/idna" +) + +const bodyCopyBufferSize = 8 * 1024 + +type requestWriter struct { + mutex sync.Mutex + encoder *qpack.Encoder + headerBuf *bytes.Buffer +} + +func newRequestWriter() *requestWriter { + headerBuf := &bytes.Buffer{} + encoder := qpack.NewEncoder(headerBuf) + return &requestWriter{ + encoder: encoder, + headerBuf: headerBuf, + } +} + +func (w *requestWriter) WriteRequestHeader(str quic.Stream, req *http.Request, gzip bool, dumps []*dump.Dumper) error { + // TODO: figure out how to add support for trailers + buf := &bytes.Buffer{} + if err := w.writeHeaders(buf, req, gzip, dumps); err != nil { + return err + } + _, err := str.Write(buf.Bytes()) + return err +} + +func (w *requestWriter) writeHeaders(wr io.Writer, req *http.Request, gzip bool, dumps []*dump.Dumper) error { + w.mutex.Lock() + defer w.mutex.Unlock() + defer w.encoder.Close() + defer w.headerBuf.Reset() + + if err := w.encodeHeaders(req, gzip, "", actualContentLength(req), dumps); err != nil { + return err + } + + b := make([]byte, 0, 128) + b = (&headersFrame{Length: uint64(w.headerBuf.Len())}).Append(b) + if _, err := wr.Write(b); err != nil { + return err + } + _, err := wr.Write(w.headerBuf.Bytes()) + return err +} + +func isExtendedConnectRequest(req *http.Request) bool { + return req.Method == http.MethodConnect && req.Proto != "" && req.Proto != "HTTP/1.1" +} + +// copied from net/transport.go +// Modified to support Extended CONNECT: +// Contrary to what the godoc for the http.Request says, +// we do respect the Proto field if the method is CONNECT. +func (w *requestWriter) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64, dumps []*dump.Dumper) error { + host := req.Host + if host == "" { + host = req.URL.Host + } + host, err := httpguts.PunycodeHostPort(host) + if err != nil { + return err + } + if !httpguts.ValidHostHeader(host) { + return errors.New("http3: invalid Host header") + } + + // http.NewRequest sets this field to HTTP/1.1 + isExtendedConnect := isExtendedConnectRequest(req) + + var path string + if req.Method != http.MethodConnect || isExtendedConnect { + path = req.URL.RequestURI() + if !validPseudoPath(path) { + orig := path + path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host) + if !validPseudoPath(path) { + if req.URL.Opaque != "" { + return fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque) + } else { + return fmt.Errorf("invalid request :path %q", orig) + } + } + } + } + + // Check for any invalid headers and return an error before we + // potentially pollute our hpack state. (We want to be able to + // continue to reuse the hpack encoder for future requests) + for k, vv := range req.Header { + if !httpguts.ValidHeaderFieldName(k) { + return fmt.Errorf("invalid HTTP header name %q", k) + } + for _, v := range vv { + if !httpguts.ValidHeaderFieldValue(v) { + return fmt.Errorf("invalid HTTP header value %q for header %q", v, k) + } + } + } + + enumerateHeaders := func(f func(name, value string)) { + var writeHeader func(name string, value ...string) + var kvs []reqheader.KeyValues + sort := false + if req.Header != nil && len(req.Header[reqheader.PseudoHeaderOderKey]) > 0 { + writeHeader = func(name string, value ...string) { + kvs = append(kvs, reqheader.KeyValues{ + Key: name, + Values: value, + }) + } + sort = true + } else { + writeHeader = func(name string, value ...string) { + for _, v := range value { + f(name, v) + } + } + } + // 8.1.2.3 Request Pseudo-Header Fields + // The :path pseudo-header field includes the path and query parts of the + // target URI (the path-absolute production and optionally a '?' character + // followed by the query production (see Sections 3.3 and 3.4 of + // [RFC3986]). + writeHeader(":authority", host) + writeHeader(":method", req.Method) + if req.Method != http.MethodConnect || isExtendedConnect { + writeHeader(":path", path) + writeHeader(":scheme", req.URL.Scheme) + } + if isExtendedConnect { + writeHeader(":protocol", req.Proto) + } + + if sort { + reqheader.SortKeyValues(kvs, req.Header[reqheader.PseudoHeaderOderKey]) + for _, kv := range kvs { + for _, v := range kv.Values { + f(kv.Key, v) + } + } + } + + if req.Header != nil && len(req.Header[reqheader.HeaderOderKey]) > 0 { + sort = true + kvs = nil + writeHeader = func(name string, value ...string) { + kvs = append(kvs, reqheader.KeyValues{ + Key: name, + Values: value, + }) + } + } else { + sort = false + writeHeader = func(name string, value ...string) { + for _, v := range value { + f(name, v) + } + } + } + + if trailers != "" { + writeHeader("trailer", trailers) + } + + var didUA bool + for k, vv := range req.Header { + if reqheader.IsExcluded(k) { + continue + } else if strings.EqualFold(k, "user-agent") { + // Match Go's http1 behavior: at most one + // User-Agent. If set to nil or empty string, + // then omit it. Otherwise if not mentioned, + // include the default (below). + didUA = true + if len(vv) < 1 { + continue + } + vv = vv[:1] + if vv[0] == "" { + continue + } + + } + + for _, v := range vv { + writeHeader(k, v) + } + } + if shouldSendReqContentLength(req.Method, contentLength) { + writeHeader("content-length", strconv.FormatInt(contentLength, 10)) + } + if addGzipHeader { + writeHeader("accept-encoding", "gzip") + } + if !didUA { + writeHeader("user-agent", reqheader.DefaultUserAgent) + } + + if sort { + reqheader.SortKeyValues(kvs, req.Header[reqheader.HeaderOderKey]) + for _, kv := range kvs { + for _, v := range kv.Values { + f(kv.Key, v) + } + } + } + } + + // Do a first pass over the headers counting bytes to ensure + // we don't exceed cc.peerMaxHeaderListSize. This is done as a + // separate pass before encoding the headers to prevent + // modifying the hpack state. + hlSize := uint64(0) + enumerateHeaders(func(name, value string) { + hf := hpack.HeaderField{Name: name, Value: value} + hlSize += uint64(hf.Size()) + }) + + // TODO: check maximum header list size + // if hlSize > cc.peerMaxHeaderListSize { + // return errRequestHeaderListSize + // } + + trace := httptrace.ContextClientTrace(req.Context()) + traceHeaders := traceHasWroteHeaderField(trace) + + // Header list size is ok. Write the headers. + enumerateHeaders(func(name, value string) { + name = strings.ToLower(name) + for _, dump := range dumps { + dump.DumpRequestHeader([]byte(fmt.Sprintf("%s: %s\r\n", name, value))) + } + w.encoder.WriteField(qpack.HeaderField{Name: name, Value: value}) + if traceHeaders { + trace.WroteHeaderField(name, []string{value}) + } + }) + + for _, dump := range dumps { + dump.DumpRequestHeader([]byte("\r\n")) + } + + return nil +} + +// authorityAddr returns a given authority (a host/IP, or host:port / ip:port) +// and returns a host:port. The port 443 is added if needed. +func authorityAddr(authority string) (addr string) { + host, port, err := net.SplitHostPort(authority) + if err != nil { // authority didn't have a port + port = "443" + host = authority + } + if a, err := idna.ToASCII(host); err == nil { + host = a + } + // IPv6 address literal, without a port: + if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") { + return host + ":" + port + } + return net.JoinHostPort(host, port) +} + +// validPseudoPath reports whether v is a valid :path pseudo-header +// value. It must be either: +// +// *) a non-empty string starting with '/' +// *) the string '*', for OPTIONS requests. +// +// For now this is only used a quick check for deciding when to clean +// up Opaque URLs before sending requests from the Transport. +// See golang.org/issue/16847 +// +// We used to enforce that the path also didn't start with "//", but +// Google's GFE accepts such paths and Chrome sends them, so ignore +// that part of the spec. See golang.org/issue/19103. +func validPseudoPath(v string) bool { + return (len(v) > 0 && v[0] == '/') || v == "*" +} + +// actualContentLength returns a sanitized version of +// req.ContentLength, where 0 actually means zero (not unknown) and -1 +// means unknown. +func actualContentLength(req *http.Request) int64 { + if req.Body == nil { + return 0 + } + if req.ContentLength != 0 { + return req.ContentLength + } + return -1 +} + +// shouldSendReqContentLength reports whether the http2.Transport should send +// a "content-length" request header. This logic is basically a copy of the net/http +// transferWriter.shouldSendContentLength. +// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown). +// -1 means unknown. +func shouldSendReqContentLength(method string, contentLength int64) bool { + if contentLength > 0 { + return true + } + if contentLength < 0 { + return false + } + // For zero bodies, whether we send a content-length depends on the method. + // It also kinda doesn't matter for http2 either way, with END_STREAM. + switch method { + case "POST", "PUT", "PATCH": + return true + default: + return false + } +} diff --git a/internal/http3/roundtrip.go b/internal/http3/roundtrip.go new file mode 100644 index 00000000..26157b2e --- /dev/null +++ b/internal/http3/roundtrip.go @@ -0,0 +1,397 @@ +package http3 + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "io" + "net" + "net/http" + "strings" + "sync" + "sync/atomic" + + "github.com/imroc/req/v3/internal/transport" + + "github.com/quic-go/quic-go" + + "golang.org/x/net/http/httpguts" +) + +// Settings are HTTP/3 settings that apply to the underlying connection. +type Settings struct { + // Support for HTTP/3 datagrams (RFC 9297) + EnableDatagrams bool + // Extended CONNECT, RFC 9220 + EnableExtendedConnect bool + // Other settings, defined by the application + Other map[uint64]uint64 +} + +// RoundTripOpt are options for the Transport.RoundTripOpt method. +type RoundTripOpt struct { + // OnlyCachedConn controls whether the RoundTripper may create a new QUIC connection. + // If set true and no cached connection is available, RoundTripOpt will return ErrNoCachedConn. + OnlyCachedConn bool +} + +type singleRoundTripper interface { + OpenRequestStream(context.Context) (RequestStream, error) + RoundTrip(*http.Request) (*http.Response, error) +} + +type roundTripperWithCount struct { + cancel context.CancelFunc + dialing chan struct{} // closed as soon as quic.Dial(Early) returned + dialErr error + conn quic.EarlyConnection + rt singleRoundTripper + + useCount atomic.Int64 +} + +func (r *roundTripperWithCount) Close() error { + r.cancel() + <-r.dialing + if r.conn != nil { + return r.conn.CloseWithError(0, "") + } + return nil +} + +// RoundTripper implements the http.RoundTripper interface +type RoundTripper struct { + *transport.Options + mutex sync.Mutex + + // TLSClientConfig specifies the TLS configuration to use with + // tls.Client. If nil, the default configuration is used. + TLSClientConfig *tls.Config + + // QUICConfig is the quic.Config used for dialing new connections. + // If nil, reasonable default values will be used. + QUICConfig *quic.Config + + // Dial specifies an optional dial function for creating QUIC + // connections for requests. + // If Dial is nil, a UDPConn will be created at the first request + // and will be reused for subsequent connections to other servers. + Dial func(ctx context.Context, addr string, tlsCfg *tls.Config, cfg *quic.Config) (quic.EarlyConnection, error) + + // Enable support for HTTP/3 datagrams (RFC 9297). + // If a QUICConfig is set, datagram support also needs to be enabled on the QUIC layer by setting EnableDatagrams. + EnableDatagrams bool + + // Additional HTTP/3 settings. + // It is invalid to specify any settings defined by RFC 9114 (HTTP/3) and RFC 9297 (HTTP Datagrams). + AdditionalSettings map[uint64]uint64 + + initOnce sync.Once + initErr error + + newClient func(quic.EarlyConnection) singleRoundTripper + + clients map[string]*roundTripperWithCount + transport *quic.Transport +} + +var ( + _ http.RoundTripper = &RoundTripper{} + _ io.Closer = &RoundTripper{} +) + +// ErrNoCachedConn is returned when RoundTripper.OnlyCachedConn is set +var ErrNoCachedConn = errors.New("http3: no cached connection was available") + +// RoundTripOpt is like RoundTrip, but takes options. +func (r *RoundTripper) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { + r.initOnce.Do(func() { r.initErr = r.init() }) + if r.initErr != nil { + return nil, r.initErr + } + + if req.URL == nil { + closeRequestBody(req) + return nil, errors.New("http3: nil Request.URL") + } + if req.URL.Scheme != "https" { + closeRequestBody(req) + return nil, fmt.Errorf("http3: unsupported protocol scheme: %s", req.URL.Scheme) + } + if req.URL.Host == "" { + closeRequestBody(req) + return nil, errors.New("http3: no Host in request URL") + } + if req.Header == nil { + closeRequestBody(req) + return nil, errors.New("http3: nil Request.Header") + } + for k, vv := range req.Header { + if !httpguts.ValidHeaderFieldName(k) { + return nil, fmt.Errorf("http3: invalid http header field name %q", k) + } + for _, v := range vv { + if !httpguts.ValidHeaderFieldValue(v) { + return nil, fmt.Errorf("http3: invalid http header field value %q for key %v", v, k) + } + } + } + + if req.Method != "" && !validMethod(req.Method) { + closeRequestBody(req) + return nil, fmt.Errorf("http3: invalid method %q", req.Method) + } + + hostname := authorityAddr(hostnameFromURL(req.URL)) + cl, isReused, err := r.getClient(req.Context(), hostname, opt.OnlyCachedConn) + if err != ErrNoCachedConn { + if debugf := r.Debugf; debugf != nil { + debugf("HTTP/3 %s %s", req.Method, req.URL.String()) + } + } + if err != nil { + return nil, err + } + + select { + case <-cl.dialing: + case <-req.Context().Done(): + return nil, context.Cause(req.Context()) + } + + if cl.dialErr != nil { + r.removeClient(hostname) + return nil, cl.dialErr + } + defer cl.useCount.Add(-1) + rsp, err := cl.rt.RoundTrip(req) + if err != nil { + // non-nil errors on roundtrip are likely due to a problem with the connection + // so we remove the client from the cache so that subsequent trips reconnect + // context cancelation is excluded as is does not signify a connection error + if !errors.Is(err, context.Canceled) { + r.removeClient(hostname) + } + + if isReused { + if nerr, ok := err.(net.Error); ok && nerr.Timeout() { + return r.RoundTripOpt(req, opt) + } + } + } + return rsp, err +} + +// RoundTrip does a round trip. +func (r *RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + return r.RoundTripOpt(req, RoundTripOpt{}) +} + +func (r *RoundTripper) init() error { + if r.newClient == nil { + r.newClient = func(conn quic.EarlyConnection) singleRoundTripper { + return &SingleDestinationRoundTripper{ + Options: r.Options, + Connection: conn, + EnableDatagrams: r.EnableDatagrams, + AdditionalSettings: r.AdditionalSettings, + } + } + } + if r.QUICConfig == nil { + r.QUICConfig = defaultQuicConfig.Clone() + r.QUICConfig.EnableDatagrams = r.EnableDatagrams + } + if r.EnableDatagrams && !r.QUICConfig.EnableDatagrams { + return errors.New("HTTP Datagrams enabled, but QUIC Datagrams disabled") + } + if len(r.QUICConfig.Versions) == 0 { + r.QUICConfig = r.QUICConfig.Clone() + r.QUICConfig.Versions = []quic.Version{SupportedVersions[0]} + } + if len(r.QUICConfig.Versions) != 1 { + return errors.New("can only use a single QUIC version for dialing a HTTP/3 connection") + } + if r.QUICConfig.MaxIncomingStreams == 0 { + r.QUICConfig.MaxIncomingStreams = -1 // don't allow any bidirectional streams + } + return nil +} + +// RoundTripOnlyCachedConn round trip only cached conn. +func (r *RoundTripper) RoundTripOnlyCachedConn(req *http.Request) (*http.Response, error) { + return r.RoundTripOpt(req, RoundTripOpt{OnlyCachedConn: true}) +} + +// AddConn add a http3 connection, dial new conn if not exists. +func (r *RoundTripper) AddConn(ctx context.Context, addr string) error { + addr = authorityAddr(addr) + cl, _, err := r.getClient(ctx, addr, false) + if err == nil { + cl.useCount.Add(-1) + } + return err +} + +func (r *RoundTripper) getClient(ctx context.Context, hostname string, onlyCached bool) (rtc *roundTripperWithCount, isReused bool, err error) { + r.mutex.Lock() + defer r.mutex.Unlock() + + if r.clients == nil { + r.clients = make(map[string]*roundTripperWithCount) + } + + cl, ok := r.clients[hostname] + if !ok { + if onlyCached { + return nil, false, ErrNoCachedConn + } + ctx, cancel := context.WithCancel(ctx) + cl = &roundTripperWithCount{ + dialing: make(chan struct{}), + cancel: cancel, + } + go func() { + defer close(cl.dialing) + defer cancel() + conn, rt, err := r.dial(ctx, hostname) + if err != nil { + cl.dialErr = err + return + } + cl.conn = conn + cl.rt = rt + }() + r.clients[hostname] = cl + } + select { + case <-cl.dialing: + if cl.dialErr != nil { + delete(r.clients, hostname) + return nil, false, cl.dialErr + } + select { + case <-cl.conn.HandshakeComplete(): + isReused = true + default: + } + default: + } + cl.useCount.Add(1) + return cl, isReused, nil +} + +func (r *RoundTripper) dial(ctx context.Context, hostname string) (quic.EarlyConnection, singleRoundTripper, error) { + var tlsConf *tls.Config + if r.TLSClientConfig == nil { + tlsConf = &tls.Config{} + } else { + tlsConf = r.TLSClientConfig.Clone() + } + if tlsConf.ServerName == "" { + sni, _, err := net.SplitHostPort(hostname) + if err != nil { + // It's ok if net.SplitHostPort returns an error - it could be a hostname/IP address without a port. + sni = hostname + } + tlsConf.ServerName = sni + } + // Replace existing ALPNs by H3 + tlsConf.NextProtos = []string{versionToALPN(r.QUICConfig.Versions[0])} + + dial := r.Dial + if dial == nil { + if r.transport == nil { + udpConn, err := net.ListenUDP("udp", nil) + if err != nil { + return nil, nil, err + } + r.transport = &quic.Transport{Conn: udpConn} + } + dial = func(ctx context.Context, addr string, tlsCfg *tls.Config, cfg *quic.Config) (quic.EarlyConnection, error) { + udpAddr, err := net.ResolveUDPAddr("udp", addr) + if err != nil { + return nil, err + } + return r.transport.DialEarly(ctx, udpAddr, tlsCfg, cfg) + } + } + + conn, err := dial(ctx, hostname, tlsConf, r.QUICConfig) + if err != nil { + return nil, nil, err + } + return conn, r.newClient(conn), nil +} + +func (r *RoundTripper) removeClient(hostname string) { + r.mutex.Lock() + defer r.mutex.Unlock() + if r.clients == nil { + return + } + delete(r.clients, hostname) +} + +// Close closes the QUIC connections that this RoundTripper has used. +// It also closes the underlying UDPConn if it is not nil. +func (r *RoundTripper) Close() error { + r.mutex.Lock() + defer r.mutex.Unlock() + for _, cl := range r.clients { + if err := cl.Close(); err != nil { + return err + } + } + r.clients = nil + if r.transport != nil { + if err := r.transport.Close(); err != nil { + return err + } + if err := r.transport.Conn.Close(); err != nil { + return err + } + r.transport = nil + } + return nil +} + +func closeRequestBody(req *http.Request) { + if req.Body != nil { + req.Body.Close() + } +} + +func validMethod(method string) bool { + /* + Method = "OPTIONS" ; Section 9.2 + | "GET" ; Section 9.3 + | "HEAD" ; Section 9.4 + | "POST" ; Section 9.5 + | "PUT" ; Section 9.6 + | "DELETE" ; Section 9.7 + | "TRACE" ; Section 9.8 + | "CONNECT" ; Section 9.9 + | extension-method + extension-method = token + token = 1* + */ + return len(method) > 0 && strings.IndexFunc(method, isNotToken) == -1 +} + +// copied from net/http/http.go +func isNotToken(r rune) bool { + return !httpguts.IsTokenRune(r) +} + +func (r *RoundTripper) CloseIdleConnections() { + r.mutex.Lock() + defer r.mutex.Unlock() + for hostname, cl := range r.clients { + if cl.useCount.Load() == 0 { + cl.Close() + delete(r.clients, hostname) + } + } +} diff --git a/internal/http3/server.go b/internal/http3/server.go new file mode 100644 index 00000000..4c36f0f0 --- /dev/null +++ b/internal/http3/server.go @@ -0,0 +1,26 @@ +package http3 + +import "github.com/quic-go/quic-go" + +// NextProtoH3 is the ALPN protocol negotiated during the TLS handshake, for QUIC v1 and v2. +const NextProtoH3 = "h3" + +// StreamType is the stream type of a unidirectional stream. +type ServerStreamType uint64 + +const ( + streamTypeControlStream = 0 + streamTypePushStream = 1 + streamTypeQPACKEncoderStream = 2 + streamTypeQPACKDecoderStream = 3 +) + +func versionToALPN(v quic.Version) string { + //nolint:exhaustive // These are all the versions we care about. + switch v { + case Version1, Version2: + return NextProtoH3 + default: + return "" + } +} diff --git a/internal/http3/state_tracking_stream.go b/internal/http3/state_tracking_stream.go new file mode 100644 index 00000000..9cf17f5e --- /dev/null +++ b/internal/http3/state_tracking_stream.go @@ -0,0 +1,116 @@ +package http3 + +import ( + "context" + "errors" + "os" + "sync" + + "github.com/quic-go/quic-go" +) + +var _ quic.Stream = &stateTrackingStream{} + +// stateTrackingStream is an implementation of quic.Stream that delegates +// to an underlying stream +// it takes care of proxying send and receive errors onto an implementation of +// the errorSetter interface (intended to be occupied by a datagrammer) +// it is also responsible for clearing the stream based on its ID from its +// parent connection, this is done through the streamClearer interface when +// both the send and receive sides are closed +type stateTrackingStream struct { + quic.Stream + + mx sync.Mutex + sendErr error + recvErr error + + clearer streamClearer + setter errorSetter +} + +type streamClearer interface { + clearStream(quic.StreamID) +} + +type errorSetter interface { + SetSendError(error) + SetReceiveError(error) +} + +func newStateTrackingStream(s quic.Stream, clearer streamClearer, setter errorSetter) *stateTrackingStream { + t := &stateTrackingStream{ + Stream: s, + clearer: clearer, + setter: setter, + } + + context.AfterFunc(s.Context(), func() { + t.closeSend(context.Cause(s.Context())) + }) + + return t +} + +func (s *stateTrackingStream) closeSend(e error) { + s.mx.Lock() + defer s.mx.Unlock() + + // clear the stream the first time both the send + // and receive are finished + if s.sendErr == nil { + if s.recvErr != nil { + s.clearer.clearStream(s.StreamID()) + } + + s.setter.SetSendError(e) + s.sendErr = e + } +} + +func (s *stateTrackingStream) closeReceive(e error) { + s.mx.Lock() + defer s.mx.Unlock() + + // clear the stream the first time both the send + // and receive are finished + if s.recvErr == nil { + if s.sendErr != nil { + s.clearer.clearStream(s.StreamID()) + } + + s.setter.SetReceiveError(e) + s.recvErr = e + } +} + +func (s *stateTrackingStream) Close() error { + s.closeSend(errors.New("write on closed stream")) + return s.Stream.Close() +} + +func (s *stateTrackingStream) CancelWrite(e quic.StreamErrorCode) { + s.closeSend(&quic.StreamError{StreamID: s.Stream.StreamID(), ErrorCode: e}) + s.Stream.CancelWrite(e) +} + +func (s *stateTrackingStream) Write(b []byte) (int, error) { + n, err := s.Stream.Write(b) + if err != nil && !errors.Is(err, os.ErrDeadlineExceeded) { + s.closeSend(err) + } + return n, err +} + +func (s *stateTrackingStream) CancelRead(e quic.StreamErrorCode) { + s.closeReceive(&quic.StreamError{StreamID: s.Stream.StreamID(), ErrorCode: e}) + s.Stream.CancelRead(e) +} + +func (s *stateTrackingStream) Read(b []byte) (int, error) { + n, err := s.Stream.Read(b) + if err != nil && !errors.Is(err, os.ErrDeadlineExceeded) { + s.closeReceive(err) + } + return n, err +} diff --git a/internal/http3/trace.go b/internal/http3/trace.go new file mode 100644 index 00000000..710072de --- /dev/null +++ b/internal/http3/trace.go @@ -0,0 +1,7 @@ +package http3 + +import "net/http/httptrace" + +func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { + return trace != nil && trace.WroteHeaderField != nil +} diff --git a/internal/netutil/addr.go b/internal/netutil/addr.go new file mode 100644 index 00000000..d5dfc430 --- /dev/null +++ b/internal/netutil/addr.go @@ -0,0 +1,39 @@ +package netutil + +import ( + "golang.org/x/net/idna" + "net" + "net/url" + "strings" +) + +func AuthorityKey(u *url.URL) string { + return u.Scheme + "://" + AuthorityAddr(u.Scheme, u.Host) +} + +// AuthorityAddr returns a given authority (a host/IP, or host:port / ip:port) +// and returns a host:port. The port 443 is added if needed. +func AuthorityAddr(scheme, authority string) (addr string) { + host, port := AuthorityHostPort(scheme, authority) + // IPv6 address literal, without a port: + if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") { + return host + ":" + port + } + addr = net.JoinHostPort(host, port) + return +} + +func AuthorityHostPort(scheme, authority string) (host, port string) { + host, port, err := net.SplitHostPort(authority) + if err != nil { // authority didn't have a port + port = "443" + if scheme == "http" { + port = "80" + } + host = authority + } + if a, err := idna.ToASCII(host); err == nil { + host = a + } + return +} diff --git a/internal/quic-go/quicvarint/io.go b/internal/quic-go/quicvarint/io.go new file mode 100644 index 00000000..9368d1c5 --- /dev/null +++ b/internal/quic-go/quicvarint/io.go @@ -0,0 +1,68 @@ +package quicvarint + +import ( + "bytes" + "io" +) + +// Reader implements both the io.ByteReader and io.Reader interfaces. +type Reader interface { + io.ByteReader + io.Reader +} + +var _ Reader = &bytes.Reader{} + +type byteReader struct { + io.Reader +} + +var _ Reader = &byteReader{} + +// NewReader returns a Reader for r. +// If r already implements both io.ByteReader and io.Reader, NewReader returns r. +// Otherwise, r is wrapped to add the missing interfaces. +func NewReader(r io.Reader) Reader { + if r, ok := r.(Reader); ok { + return r + } + return &byteReader{r} +} + +func (r *byteReader) ReadByte() (byte, error) { + var b [1]byte + n, err := r.Reader.Read(b[:]) + if n == 1 && err == io.EOF { + err = nil + } + return b[0], err +} + +// Writer implements both the io.ByteWriter and io.Writer interfaces. +type Writer interface { + io.ByteWriter + io.Writer +} + +var _ Writer = &bytes.Buffer{} + +type byteWriter struct { + io.Writer +} + +var _ Writer = &byteWriter{} + +// NewWriter returns a Writer for w. +// If r already implements both io.ByteWriter and io.Writer, NewWriter returns w. +// Otherwise, w is wrapped to add the missing interfaces. +func NewWriter(w io.Writer) Writer { + if w, ok := w.(Writer); ok { + return w + } + return &byteWriter{w} +} + +func (w *byteWriter) WriteByte(c byte) error { + _, err := w.Writer.Write([]byte{c}) + return err +} diff --git a/internal/quic-go/quicvarint/varint.go b/internal/quic-go/quicvarint/varint.go new file mode 100644 index 00000000..9a22e334 --- /dev/null +++ b/internal/quic-go/quicvarint/varint.go @@ -0,0 +1,164 @@ +package quicvarint + +import ( + "fmt" + "io" +) + +// taken from the QUIC draft +const ( + // Min is the minimum value allowed for a QUIC varint. + Min = 0 + + // Max is the maximum allowed value for a QUIC varint (2^62-1). + Max = maxVarInt8 + + maxVarInt1 = 63 + maxVarInt2 = 16383 + maxVarInt4 = 1073741823 + maxVarInt8 = 4611686018427387903 +) + +// Read reads a number in the QUIC varint format from r. +func Read(r io.ByteReader) (uint64, error) { + firstByte, err := r.ReadByte() + if err != nil { + return 0, err + } + // the first two bits of the first byte encode the length + l := 1 << ((firstByte & 0xc0) >> 6) + b1 := firstByte & (0xff - 0xc0) + if l == 1 { + return uint64(b1), nil + } + b2, err := r.ReadByte() + if err != nil { + return 0, err + } + if l == 2 { + return uint64(b2) + uint64(b1)<<8, nil + } + b3, err := r.ReadByte() + if err != nil { + return 0, err + } + b4, err := r.ReadByte() + if err != nil { + return 0, err + } + if l == 4 { + return uint64(b4) + uint64(b3)<<8 + uint64(b2)<<16 + uint64(b1)<<24, nil + } + b5, err := r.ReadByte() + if err != nil { + return 0, err + } + b6, err := r.ReadByte() + if err != nil { + return 0, err + } + b7, err := r.ReadByte() + if err != nil { + return 0, err + } + b8, err := r.ReadByte() + if err != nil { + return 0, err + } + return uint64(b8) + uint64(b7)<<8 + uint64(b6)<<16 + uint64(b5)<<24 + uint64(b4)<<32 + uint64(b3)<<40 + uint64(b2)<<48 + uint64(b1)<<56, nil +} + +// Parse reads a number in the QUIC varint format. +// It returns the number of bytes consumed. +func Parse(b []byte) (uint64 /* value */, int /* bytes consumed */, error) { + if len(b) == 0 { + return 0, 0, io.EOF + } + firstByte := b[0] + // the first two bits of the first byte encode the length + l := 1 << ((firstByte & 0xc0) >> 6) + if len(b) < l { + return 0, 0, io.ErrUnexpectedEOF + } + b0 := firstByte & (0xff - 0xc0) + if l == 1 { + return uint64(b0), 1, nil + } + if l == 2 { + return uint64(b[1]) + uint64(b0)<<8, 2, nil + } + if l == 4 { + return uint64(b[3]) + uint64(b[2])<<8 + uint64(b[1])<<16 + uint64(b0)<<24, 4, nil + } + return uint64(b[7]) + uint64(b[6])<<8 + uint64(b[5])<<16 + uint64(b[4])<<24 + uint64(b[3])<<32 + uint64(b[2])<<40 + uint64(b[1])<<48 + uint64(b0)<<56, 8, nil +} + +// Append appends i in the QUIC varint format. +func Append(b []byte, i uint64) []byte { + if i <= maxVarInt1 { + return append(b, uint8(i)) + } + if i <= maxVarInt2 { + return append(b, []byte{uint8(i>>8) | 0x40, uint8(i)}...) + } + if i <= maxVarInt4 { + return append(b, []byte{uint8(i>>24) | 0x80, uint8(i >> 16), uint8(i >> 8), uint8(i)}...) + } + if i <= maxVarInt8 { + return append(b, []byte{ + uint8(i>>56) | 0xc0, uint8(i >> 48), uint8(i >> 40), uint8(i >> 32), + uint8(i >> 24), uint8(i >> 16), uint8(i >> 8), uint8(i), + }...) + } + panic(fmt.Sprintf("%#x doesn't fit into 62 bits", i)) +} + +// AppendWithLen append i in the QUIC varint format with the desired length. +func AppendWithLen(b []byte, i uint64, length int) []byte { + if length != 1 && length != 2 && length != 4 && length != 8 { + panic("invalid varint length") + } + l := Len(i) + if l == length { + return Append(b, i) + } + if l > length { + panic(fmt.Sprintf("cannot encode %d in %d bytes", i, length)) + } + if length == 2 { + b = append(b, 0b01000000) + } else if length == 4 { + b = append(b, 0b10000000) + } else if length == 8 { + b = append(b, 0b11000000) + } + for j := 1; j < length-l; j++ { + b = append(b, 0) + } + for j := 0; j < l; j++ { + b = append(b, uint8(i>>(8*(l-1-j)))) + } + return b +} + +// Len determines the number of bytes that will be needed to write the number i. +func Len(i uint64) int { + if i <= maxVarInt1 { + return 1 + } + if i <= maxVarInt2 { + return 2 + } + if i <= maxVarInt4 { + return 4 + } + if i <= maxVarInt8 { + return 8 + } + // Don't use a fmt.Sprintf here to format the error message. + // The function would then exceed the inlining budget. + panic(struct { + message string + num uint64 + }{"value doesn't fit into 62 bits: ", i}) +} diff --git a/internal/socks/client.go b/internal/socks/client.go new file mode 100644 index 00000000..3d6f516a --- /dev/null +++ b/internal/socks/client.go @@ -0,0 +1,168 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socks + +import ( + "context" + "errors" + "io" + "net" + "strconv" + "time" +) + +var ( + noDeadline = time.Time{} + aLongTimeAgo = time.Unix(1, 0) +) + +func (d *Dialer) connect(ctx context.Context, c net.Conn, address string) (_ net.Addr, ctxErr error) { + host, port, err := splitHostPort(address) + if err != nil { + return nil, err + } + if deadline, ok := ctx.Deadline(); ok && !deadline.IsZero() { + c.SetDeadline(deadline) + defer c.SetDeadline(noDeadline) + } + if ctx != context.Background() { + errCh := make(chan error, 1) + done := make(chan struct{}) + defer func() { + close(done) + if ctxErr == nil { + ctxErr = <-errCh + } + }() + go func() { + select { + case <-ctx.Done(): + c.SetDeadline(aLongTimeAgo) + errCh <- ctx.Err() + case <-done: + errCh <- nil + } + }() + } + + b := make([]byte, 0, 6+len(host)) // the size here is just an estimate + b = append(b, Version5) + if len(d.AuthMethods) == 0 || d.Authenticate == nil { + b = append(b, 1, byte(AuthMethodNotRequired)) + } else { + ams := d.AuthMethods + if len(ams) > 255 { + return nil, errors.New("too many authentication methods") + } + b = append(b, byte(len(ams))) + for _, am := range ams { + b = append(b, byte(am)) + } + } + if _, ctxErr = c.Write(b); ctxErr != nil { + return + } + + if _, ctxErr = io.ReadFull(c, b[:2]); ctxErr != nil { + return + } + if b[0] != Version5 { + return nil, errors.New("unexpected protocol version " + strconv.Itoa(int(b[0]))) + } + am := AuthMethod(b[1]) + if am == AuthMethodNoAcceptableMethods { + return nil, errors.New("no acceptable authentication methods") + } + if d.Authenticate != nil { + if ctxErr = d.Authenticate(ctx, c, am); ctxErr != nil { + return + } + } + + b = b[:0] + b = append(b, Version5, byte(d.cmd), 0) + if ip := net.ParseIP(host); ip != nil { + if ip4 := ip.To4(); ip4 != nil { + b = append(b, AddrTypeIPv4) + b = append(b, ip4...) + } else if ip6 := ip.To16(); ip6 != nil { + b = append(b, AddrTypeIPv6) + b = append(b, ip6...) + } else { + return nil, errors.New("unknown address type") + } + } else { + if len(host) > 255 { + return nil, errors.New("FQDN too long") + } + b = append(b, AddrTypeFQDN) + b = append(b, byte(len(host))) + b = append(b, host...) + } + b = append(b, byte(port>>8), byte(port)) + if _, ctxErr = c.Write(b); ctxErr != nil { + return + } + + if _, ctxErr = io.ReadFull(c, b[:4]); ctxErr != nil { + return + } + if b[0] != Version5 { + return nil, errors.New("unexpected protocol version " + strconv.Itoa(int(b[0]))) + } + if cmdErr := Reply(b[1]); cmdErr != StatusSucceeded { + return nil, errors.New("unknown error " + cmdErr.String()) + } + if b[2] != 0 { + return nil, errors.New("non-zero reserved field") + } + l := 2 + var a Addr + switch b[3] { + case AddrTypeIPv4: + l += net.IPv4len + a.IP = make(net.IP, net.IPv4len) + case AddrTypeIPv6: + l += net.IPv6len + a.IP = make(net.IP, net.IPv6len) + case AddrTypeFQDN: + if _, err := io.ReadFull(c, b[:1]); err != nil { + return nil, err + } + l += int(b[0]) + default: + return nil, errors.New("unknown address type " + strconv.Itoa(int(b[3]))) + } + if cap(b) < l { + b = make([]byte, l) + } else { + b = b[:l] + } + if _, ctxErr = io.ReadFull(c, b); ctxErr != nil { + return + } + if a.IP != nil { + copy(a.IP, b) + } else { + a.Name = string(b[:len(b)-2]) + } + a.Port = int(b[len(b)-2])<<8 | int(b[len(b)-1]) + return &a, nil +} + +func splitHostPort(address string) (string, int, error) { + host, port, err := net.SplitHostPort(address) + if err != nil { + return "", 0, err + } + portnum, err := strconv.Atoi(port) + if err != nil { + return "", 0, err + } + if 1 > portnum || portnum > 0xffff { + return "", 0, errors.New("port number out of range " + port) + } + return host, portnum, nil +} diff --git a/internal/socks/dial_test.go b/internal/socks/dial_test.go new file mode 100644 index 00000000..7a10a57d --- /dev/null +++ b/internal/socks/dial_test.go @@ -0,0 +1,395 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socks + +import ( + "context" + "errors" + "golang.org/x/net/nettest" + "io" + "math/rand" + "net" + "os" + "testing" + "time" +) + +// An AuthRequest represents an authentication request. +type AuthRequest struct { + Version int + Methods []AuthMethod +} + +// ParseAuthRequest parses an authentication request. +func ParseAuthRequest(b []byte) (*AuthRequest, error) { + if len(b) < 2 { + return nil, errors.New("short auth request") + } + if b[0] != Version5 { + return nil, errors.New("unexpected protocol version") + } + if len(b)-2 < int(b[1]) { + return nil, errors.New("short auth request") + } + req := &AuthRequest{Version: int(b[0])} + if b[1] > 0 { + req.Methods = make([]AuthMethod, b[1]) + for i, m := range b[2 : 2+b[1]] { + req.Methods[i] = AuthMethod(m) + } + } + return req, nil +} + +// MarshalAuthReply returns an authentication reply in wire format. +func MarshalAuthReply(ver int, m AuthMethod) ([]byte, error) { + return []byte{byte(ver), byte(m)}, nil +} + +// A CmdRequest repesents a command request. +type CmdRequest struct { + Version int + Cmd Command + Addr Addr +} + +// ParseCmdRequest parses a command request. +func ParseCmdRequest(b []byte) (*CmdRequest, error) { + if len(b) < 7 { + return nil, errors.New("short cmd request") + } + if b[0] != Version5 { + return nil, errors.New("unexpected protocol version") + } + if Command(b[1]) != CmdConnect { + return nil, errors.New("unexpected command") + } + if b[2] != 0 { + return nil, errors.New("non-zero reserved field") + } + req := &CmdRequest{Version: int(b[0]), Cmd: Command(b[1])} + l := 2 + off := 4 + switch b[3] { + case AddrTypeIPv4: + l += net.IPv4len + req.Addr.IP = make(net.IP, net.IPv4len) + case AddrTypeIPv6: + l += net.IPv6len + req.Addr.IP = make(net.IP, net.IPv6len) + case AddrTypeFQDN: + l += int(b[4]) + off = 5 + default: + return nil, errors.New("unknown address type") + } + if len(b[off:]) < l { + return nil, errors.New("short cmd request") + } + if req.Addr.IP != nil { + copy(req.Addr.IP, b[off:]) + } else { + req.Addr.Name = string(b[off : off+l-2]) + } + req.Addr.Port = int(b[off+l-2])<<8 | int(b[off+l-1]) + return req, nil +} + +// MarshalCmdReply returns a command reply in wire format. +func MarshalCmdReply(ver int, reply Reply, a *Addr) ([]byte, error) { + b := make([]byte, 4) + b[0] = byte(ver) + b[1] = byte(reply) + if a.Name != "" { + if len(a.Name) > 255 { + return nil, errors.New("fqdn too long") + } + b[3] = AddrTypeFQDN + b = append(b, byte(len(a.Name))) + b = append(b, a.Name...) + } else if ip4 := a.IP.To4(); ip4 != nil { + b[3] = AddrTypeIPv4 + b = append(b, ip4...) + } else if ip6 := a.IP.To16(); ip6 != nil { + b[3] = AddrTypeIPv6 + b = append(b, ip6...) + } else { + return nil, errors.New("unknown address type") + } + b = append(b, byte(a.Port>>8), byte(a.Port)) + return b, nil +} + +// A Server repesents a server for handshake testing. +type Server struct { + ln net.Listener +} + +// Addr rerurns a server address. +func (s *Server) Addr() net.Addr { + return s.ln.Addr() +} + +// TargetAddr returns a fake final destination address. +// +// The returned address is only valid for testing with Server. +func (s *Server) TargetAddr() net.Addr { + a := s.ln.Addr() + switch a := a.(type) { + case *net.TCPAddr: + if a.IP.To4() != nil { + return &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 5963} + } + if a.IP.To16() != nil && a.IP.To4() == nil { + return &net.TCPAddr{IP: net.IPv6loopback, Port: 5963} + } + } + return nil +} + +// Close closes the server. +func (s *Server) Close() error { + return s.ln.Close() +} + +func (s *Server) serve(authFunc, cmdFunc func(io.ReadWriter, []byte) error) { + c, err := s.ln.Accept() + if err != nil { + return + } + defer c.Close() + go s.serve(authFunc, cmdFunc) + b := make([]byte, 512) + n, err := c.Read(b) + if err != nil { + return + } + if err := authFunc(c, b[:n]); err != nil { + return + } + n, err = c.Read(b) + if err != nil { + return + } + if err := cmdFunc(c, b[:n]); err != nil { + return + } +} + +// NewServer returns a new server. +// +// The provided authFunc and cmdFunc must parse requests and return +// appropriate replies to clients. +func NewServer(authFunc, cmdFunc func(io.ReadWriter, []byte) error) (*Server, error) { + var err error + s := new(Server) + s.ln, err = nettest.NewLocalListener("tcp") + if err != nil { + return nil, err + } + go s.serve(authFunc, cmdFunc) + return s, nil +} + +// NoAuthRequired handles a no-authentication-required signaling. +func NoAuthRequired(rw io.ReadWriter, b []byte) error { + req, err := ParseAuthRequest(b) + if err != nil { + return err + } + b, err = MarshalAuthReply(req.Version, AuthMethodNotRequired) + if err != nil { + return err + } + n, err := rw.Write(b) + if err != nil { + return err + } + if n != len(b) { + return errors.New("short write") + } + return nil +} + +// NoProxyRequired handles a command signaling without constructing a +// proxy connection to the final destination. +func NoProxyRequired(rw io.ReadWriter, b []byte) error { + req, err := ParseCmdRequest(b) + if err != nil { + return err + } + req.Addr.Port += 1 + if req.Addr.Name != "" { + req.Addr.Name = "boundaddr.doesnotexist" + } else if req.Addr.IP.To4() != nil { + req.Addr.IP = net.IPv4(127, 0, 0, 1) + } else { + req.Addr.IP = net.IPv6loopback + } + b, err = MarshalCmdReply(Version5, StatusSucceeded, &req.Addr) + if err != nil { + return err + } + n, err := rw.Write(b) + if err != nil { + return err + } + if n != len(b) { + return errors.New("short write") + } + return nil +} + +func TestDial(t *testing.T) { + t.Run("Connect", func(t *testing.T) { + ss, err := NewServer(NoAuthRequired, NoProxyRequired) + if err != nil { + t.Fatal(err) + } + defer ss.Close() + d := NewDialer(ss.Addr().Network(), ss.Addr().String()) + d.AuthMethods = []AuthMethod{ + AuthMethodNotRequired, + AuthMethodUsernamePassword, + } + d.Authenticate = (&UsernamePassword{ + Username: "username", + Password: "password", + }).Authenticate + c, err := d.DialContext(context.Background(), ss.TargetAddr().Network(), ss.TargetAddr().String()) + if err != nil { + t.Fatal(err) + } + c.(*Conn).BoundAddr() + c.Close() + }) + t.Run("ConnectWithConn", func(t *testing.T) { + ss, err := NewServer(NoAuthRequired, NoProxyRequired) + if err != nil { + t.Fatal(err) + } + defer ss.Close() + c, err := net.Dial(ss.Addr().Network(), ss.Addr().String()) + if err != nil { + t.Fatal(err) + } + defer c.Close() + d := NewDialer(ss.Addr().Network(), ss.Addr().String()) + d.AuthMethods = []AuthMethod{ + AuthMethodNotRequired, + AuthMethodUsernamePassword, + } + d.Authenticate = (&UsernamePassword{ + Username: "username", + Password: "password", + }).Authenticate + a, err := d.DialWithConn(context.Background(), c, ss.TargetAddr().Network(), ss.TargetAddr().String()) + if err != nil { + t.Fatal(err) + } + if _, ok := a.(*Addr); !ok { + t.Fatalf("got %+v; want Addr", a) + } + }) + t.Run("Cancel", func(t *testing.T) { + ss, err := NewServer(NoAuthRequired, blackholeCmdFunc) + if err != nil { + t.Fatal(err) + } + defer ss.Close() + d := NewDialer(ss.Addr().Network(), ss.Addr().String()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + dialErr := make(chan error) + go func() { + c, err := d.DialContext(ctx, ss.TargetAddr().Network(), ss.TargetAddr().String()) + if err == nil { + c.Close() + } + dialErr <- err + }() + time.Sleep(100 * time.Millisecond) + cancel() + err = <-dialErr + if perr, nerr := parseDialError(err); perr != context.Canceled && nerr == nil { + t.Fatalf("got %v; want context.Canceled or equivalent", err) + } + }) + t.Run("Deadline", func(t *testing.T) { + ss, err := NewServer(NoAuthRequired, blackholeCmdFunc) + if err != nil { + t.Fatal(err) + } + defer ss.Close() + d := NewDialer(ss.Addr().Network(), ss.Addr().String()) + ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(100*time.Millisecond)) + defer cancel() + c, err := d.DialContext(ctx, ss.TargetAddr().Network(), ss.TargetAddr().String()) + if err == nil { + c.Close() + } + if perr, nerr := parseDialError(err); perr != context.DeadlineExceeded && nerr == nil { + t.Fatalf("got %v; want context.DeadlineExceeded or equivalent", err) + } + }) + t.Run("WithRogueServer", func(t *testing.T) { + ss, err := NewServer(NoAuthRequired, rogueCmdFunc) + if err != nil { + t.Fatal(err) + } + defer ss.Close() + d := NewDialer(ss.Addr().Network(), ss.Addr().String()) + for i := 0; i < 2*len(rogueCmdList); i++ { + ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(100*time.Millisecond)) + defer cancel() + c, err := d.DialContext(ctx, ss.TargetAddr().Network(), ss.TargetAddr().String()) + if err == nil { + t.Log(c.(*Conn).BoundAddr()) + c.Close() + t.Error("should fail") + } + } + }) +} + +func blackholeCmdFunc(rw io.ReadWriter, b []byte) error { + if _, err := ParseCmdRequest(b); err != nil { + return err + } + var bb [1]byte + for { + if _, err := rw.Read(bb[:]); err != nil { + return err + } + } +} + +func rogueCmdFunc(rw io.ReadWriter, b []byte) error { + if _, err := ParseCmdRequest(b); err != nil { + return err + } + rw.Write(rogueCmdList[rand.Intn(len(rogueCmdList))]) + return nil +} + +var rogueCmdList = [][]byte{ + {0x05}, + {0x06, 0x00, 0x00, 0x01, 192, 0, 2, 1, 0x17, 0x4b}, + {0x05, 0x00, 0xff, 0x01, 192, 0, 2, 2, 0x17, 0x4b}, + {0x05, 0x00, 0x00, 0x01, 192, 0, 2, 3}, + {0x05, 0x00, 0x00, 0x03, 0x04, 'F', 'Q', 'D', 'N'}, +} + +func parseDialError(err error) (perr, nerr error) { + if e, ok := err.(*net.OpError); ok { + err = e.Err + nerr = e + } + if e, ok := err.(*os.SyscallError); ok { + err = e.Err + } + perr = err + return +} diff --git a/internal/socks/socks.go b/internal/socks/socks.go new file mode 100644 index 00000000..cddef90f --- /dev/null +++ b/internal/socks/socks.go @@ -0,0 +1,289 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package socks provides a SOCKS version 5 client implementation. +// +// SOCKS protocol version 5 is defined in RFC 1928. +// Username/Password authentication for SOCKS version 5 is defined in +// RFC 1929. +package socks + +import ( + "context" + "errors" + "io" + "net" + "strconv" +) + +// A Command represents a SOCKS command. +type Command int + +func (cmd Command) String() string { + switch cmd { + case CmdConnect: + return "socks connect" + case cmdBind: + return "socks bind" + default: + return "socks " + strconv.Itoa(int(cmd)) + } +} + +// An AuthMethod represents a SOCKS authentication method. +type AuthMethod int + +// A Reply represents a SOCKS command reply code. +type Reply int + +func (code Reply) String() string { + switch code { + case StatusSucceeded: + return "succeeded" + case 0x01: + return "general SOCKS server failure" + case 0x02: + return "connection not allowed by ruleset" + case 0x03: + return "network unreachable" + case 0x04: + return "host unreachable" + case 0x05: + return "connection refused" + case 0x06: + return "TTL expired" + case 0x07: + return "command not supported" + case 0x08: + return "address type not supported" + default: + return "unknown code: " + strconv.Itoa(int(code)) + } +} + +// Wire protocol constants. +const ( + Version5 = 0x05 + + AddrTypeIPv4 = 0x01 + AddrTypeFQDN = 0x03 + AddrTypeIPv6 = 0x04 + + CmdConnect Command = 0x01 // establishes an active-open forward proxy connection + cmdBind Command = 0x02 // establishes a passive-open forward proxy connection + + AuthMethodNotRequired AuthMethod = 0x00 // no authentication required + AuthMethodUsernamePassword AuthMethod = 0x02 // use username/password + AuthMethodNoAcceptableMethods AuthMethod = 0xff // no acceptable authentication methods + + StatusSucceeded Reply = 0x00 +) + +// An Addr represents a SOCKS-specific address. +// Either Name or IP is used exclusively. +type Addr struct { + Name string // fully-qualified domain name + IP net.IP + Port int +} + +// Network return "socks" +func (a *Addr) Network() string { return "socks" } + +func (a *Addr) String() string { + if a == nil { + return "" + } + port := strconv.Itoa(a.Port) + if a.IP == nil { + return net.JoinHostPort(a.Name, port) + } + return net.JoinHostPort(a.IP.String(), port) +} + +// A Conn represents a forward proxy connection. +type Conn struct { + net.Conn + + boundAddr net.Addr +} + +// BoundAddr returns the address assigned by the proxy server for +// connecting to the command target address from the proxy server. +func (c *Conn) BoundAddr() net.Addr { + if c == nil { + return nil + } + return c.boundAddr +} + +// A Dialer holds SOCKS-specific options. +type Dialer struct { + cmd Command // either CmdConnect or cmdBind + proxyNetwork string // network between a proxy server and a client + proxyAddress string // proxy server address + + // ProxyDial specifies the optional dial function for + // establishing the transport connection. + ProxyDial func(context.Context, string, string) (net.Conn, error) + + // AuthMethods specifies the list of request authentication + // methods. + // If empty, SOCKS client requests only AuthMethodNotRequired. + AuthMethods []AuthMethod + + // Authenticate specifies the optional authentication + // function. It must be non-nil when AuthMethods is not empty. + // It must return an error when the authentication is failed. + Authenticate func(context.Context, io.ReadWriter, AuthMethod) error +} + +// DialContext connects to the provided address on the provided +// network. +// +// The returned error value may be a net.OpError. When the Op field of +// net.OpError contains "socks", the Source field contains a proxy +// server address and the Addr field contains a command target +// address. +// +// See func Dial of the net package of standard library for a +// description of the network and address parameters. +func (d *Dialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) { + if err := d.validateTarget(network, address); err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + if ctx == nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: errors.New("nil context")} + } + var err error + var c net.Conn + if d.ProxyDial != nil { + c, err = d.ProxyDial(ctx, d.proxyNetwork, d.proxyAddress) + } else { + var dd net.Dialer + c, err = dd.DialContext(ctx, d.proxyNetwork, d.proxyAddress) + } + if err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + a, err := d.connect(ctx, c, address) + if err != nil { + c.Close() + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + return &Conn{Conn: c, boundAddr: a}, nil +} + +// DialWithConn initiates a connection from SOCKS server to the target +// network and address using the connection c that is already +// connected to the SOCKS server. +// +// It returns the connection's local address assigned by the SOCKS +// server. +func (d *Dialer) DialWithConn(ctx context.Context, c net.Conn, network, address string) (net.Addr, error) { + if err := d.validateTarget(network, address); err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + if ctx == nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: errors.New("nil context")} + } + a, err := d.connect(ctx, c, address) + if err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + return a, nil +} + +func (d *Dialer) validateTarget(network, address string) error { + switch network { + case "tcp", "tcp6", "tcp4": + default: + return errors.New("network not implemented") + } + switch d.cmd { + case CmdConnect, cmdBind: + default: + return errors.New("command not implemented") + } + return nil +} + +func (d *Dialer) pathAddrs(address string) (proxy, dst net.Addr, err error) { + for i, s := range []string{d.proxyAddress, address} { + host, port, err := splitHostPort(s) + if err != nil { + return nil, nil, err + } + a := &Addr{Port: port} + a.IP = net.ParseIP(host) + if a.IP == nil { + a.Name = host + } + if i == 0 { + proxy = a + } else { + dst = a + } + } + return +} + +// NewDialer returns a new Dialer that dials through the provided +// proxy server's network and address. +func NewDialer(network, address string) *Dialer { + return &Dialer{proxyNetwork: network, proxyAddress: address, cmd: CmdConnect} +} + +const ( + authUsernamePasswordVersion = 0x01 + authStatusSucceeded = 0x00 +) + +// UsernamePassword are the credentials for the username/password +// authentication method. +type UsernamePassword struct { + Username string + Password string +} + +// Authenticate authenticates a pair of username and password with the +// proxy server. +func (up *UsernamePassword) Authenticate(ctx context.Context, rw io.ReadWriter, auth AuthMethod) error { + switch auth { + case AuthMethodNotRequired: + return nil + case AuthMethodUsernamePassword: + if len(up.Username) == 0 || len(up.Username) > 255 || len(up.Password) == 0 || len(up.Password) > 255 { + return errors.New("invalid username/password") + } + b := []byte{authUsernamePasswordVersion} + b = append(b, byte(len(up.Username))) + b = append(b, up.Username...) + b = append(b, byte(len(up.Password))) + b = append(b, up.Password...) + // TODO(mikio): handle IO deadlines and cancelation if + // necessary + if _, err := rw.Write(b); err != nil { + return err + } + if _, err := io.ReadFull(rw, b[:2]); err != nil { + return err + } + if b[0] != authUsernamePasswordVersion { + return errors.New("invalid username/password version") + } + if b[1] != authStatusSucceeded { + return errors.New("username/password authentication failed") + } + return nil + } + return errors.New("unsupported authentication method " + strconv.Itoa(int(auth))) +} diff --git a/internal/socks/socks_test.go b/internal/socks/socks_test.go new file mode 100644 index 00000000..824a09d7 --- /dev/null +++ b/internal/socks/socks_test.go @@ -0,0 +1,48 @@ +package socks + +import ( + "bytes" + "context" + "github.com/imroc/req/v3/internal/tests" + "strings" + "testing" +) + +func TestReply(t *testing.T) { + for i := 0; i < 9; i++ { + s := Reply(i).String() + if strings.Contains(s, "unknown") { + t.Errorf("resply code [%d] should not unknown", i) + } + } + s := Reply(9).String() + if !strings.Contains(s, "unknown") { + t.Errorf("resply code [%d] should unknown", 9) + } +} + +func TestAuthenticate(t *testing.T) { + auth := &UsernamePassword{ + Username: "imroc", + Password: "123456", + } + buf := bytes.NewBuffer([]byte{byte(0x01), byte(0x00)}) + err := auth.Authenticate(context.Background(), buf, AuthMethodUsernamePassword) + tests.AssertNoError(t, err) + auth.Username = "this is a very long long long long long long long long long long long long long long long long long long long long long long long long long long long long long long long long long long long long long long long long long long long long long long long long long long long long long long long long long long long long long long long long long name" + err = auth.Authenticate(context.Background(), buf, AuthMethodUsernamePassword) + tests.AssertErrorContains(t, err, "invalid") + + auth.Username = "imroc" + buf = bytes.NewBuffer([]byte{byte(0x03), byte(0x00)}) + err = auth.Authenticate(context.Background(), buf, AuthMethodUsernamePassword) + tests.AssertErrorContains(t, err, "invalid username/password version") + + buf = bytes.NewBuffer([]byte{byte(0x01), byte(0x02)}) + err = auth.Authenticate(context.Background(), buf, AuthMethodUsernamePassword) + tests.AssertErrorContains(t, err, "authentication failed") + + err = auth.Authenticate(context.Background(), buf, AuthMethodNoAcceptableMethods) + tests.AssertErrorContains(t, err, "unsupported authentication method") + +} diff --git a/internal/testcert/testcert.go b/internal/testcert/testcert.go new file mode 100644 index 00000000..5f94704e --- /dev/null +++ b/internal/testcert/testcert.go @@ -0,0 +1,46 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package testcert contains a test-only localhost certificate. +package testcert + +import "strings" + +// LocalhostCert is a PEM-encoded TLS cert with SAN IPs +// "127.0.0.1" and "[::1]", expiring at Jan 29 16:00:00 2084 GMT. +// generated from src/crypto/tls: +// go run generate_cert.go --rsa-bits 1024 --host 127.0.0.1,::1,example.com --ca --start-date "Jan 1 00:00:00 1970" --duration=1000000h +var LocalhostCert = []byte(`-----BEGIN CERTIFICATE----- +MIICEzCCAXygAwIBAgIQMIMChMLGrR+QvmQvpwAU6zANBgkqhkiG9w0BAQsFADAS +MRAwDgYDVQQKEwdBY21lIENvMCAXDTcwMDEwMTAwMDAwMFoYDzIwODQwMTI5MTYw +MDAwWjASMRAwDgYDVQQKEwdBY21lIENvMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCB +iQKBgQDuLnQAI3mDgey3VBzWnB2L39JUU4txjeVE6myuDqkM/uGlfjb9SjY1bIw4 +iA5sBBZzHi3z0h1YV8QPuxEbi4nW91IJm2gsvvZhIrCHS3l6afab4pZBl2+XsDul +rKBxKKtD1rGxlG4LjncdabFn9gvLZad2bSysqz/qTAUStTvqJQIDAQABo2gwZjAO +BgNVHQ8BAf8EBAMCAqQwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDwYDVR0TAQH/BAUw +AwEB/zAuBgNVHREEJzAlggtleGFtcGxlLmNvbYcEfwAAAYcQAAAAAAAAAAAAAAAA +AAAAATANBgkqhkiG9w0BAQsFAAOBgQCEcetwO59EWk7WiJsG4x8SY+UIAA+flUI9 +tyC4lNhbcF2Idq9greZwbYCqTTTr2XiRNSMLCOjKyI7ukPoPjo16ocHj+P3vZGfs +h1fIw3cSS2OolhloGw/XM6RWPWtPAlGykKLciQrBru5NAPvCMsb/I1DAceTiotQM +fblo6RBxUQ== +-----END CERTIFICATE-----`) + +// LocalhostKey is the private key for LocalhostCert. +var LocalhostKey = []byte(testingKey(`-----BEGIN RSA TESTING KEY----- +MIICXgIBAAKBgQDuLnQAI3mDgey3VBzWnB2L39JUU4txjeVE6myuDqkM/uGlfjb9 +SjY1bIw4iA5sBBZzHi3z0h1YV8QPuxEbi4nW91IJm2gsvvZhIrCHS3l6afab4pZB +l2+XsDulrKBxKKtD1rGxlG4LjncdabFn9gvLZad2bSysqz/qTAUStTvqJQIDAQAB +AoGAGRzwwir7XvBOAy5tM/uV6e+Zf6anZzus1s1Y1ClbjbE6HXbnWWF/wbZGOpet +3Zm4vD6MXc7jpTLryzTQIvVdfQbRc6+MUVeLKwZatTXtdZrhu+Jk7hx0nTPy8Jcb +uJqFk541aEw+mMogY/xEcfbWd6IOkp+4xqjlFLBEDytgbIECQQDvH/E6nk+hgN4H +qzzVtxxr397vWrjrIgPbJpQvBsafG7b0dA4AFjwVbFLmQcj2PprIMmPcQrooz8vp +jy4SHEg1AkEA/v13/5M47K9vCxmb8QeD/asydfsgS5TeuNi8DoUBEmiSJwma7FXY +fFUtxuvL7XvjwjN5B30pNEbc6Iuyt7y4MQJBAIt21su4b3sjXNueLKH85Q+phy2U +fQtuUE9txblTu14q3N7gHRZB4ZMhFYyDy8CKrN2cPg/Fvyt0Xlp/DoCzjA0CQQDU +y2ptGsuSmgUtWj3NM9xuwYPm+Z/F84K6+ARYiZ6PYj013sovGKUFfYAqVXVlxtIX +qyUBnu3X9ps8ZfjLZO7BAkEAlT4R5Yl6cGhaJQYZHOde3JEMhNRcVFMO8dJDaFeo +f9Oeos0UUothgiDktdQHxdNEwLjQf7lJJBzV+5OtwswCWA== +-----END RSA TESTING KEY-----`)) + +func testingKey(s string) string { return strings.ReplaceAll(s, "TESTING KEY", "PRIVATE KEY") } diff --git a/internal/testdata/ca.pem b/internal/testdata/ca.pem new file mode 100644 index 00000000..67a5545e --- /dev/null +++ b/internal/testdata/ca.pem @@ -0,0 +1,17 @@ +-----BEGIN CERTIFICATE----- +MIICzDCCAbQCCQDA+rLymNnfJzANBgkqhkiG9w0BAQsFADAoMSYwJAYDVQQKDB1x +dWljLWdvIENlcnRpZmljYXRlIEF1dGhvcml0eTAeFw0yMDA4MTgwOTIxMzVaFw0z +MDA4MTYwOTIxMzVaMCgxJjAkBgNVBAoMHXF1aWMtZ28gQ2VydGlmaWNhdGUgQXV0 +aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA1OcsYrVaSDfh +iDppl6oteVspOY3yFb96T9Y/biaGPJAkBO9VGKcqwOUPmUeiWpedRAUB9LE7Srs6 +qBX4mnl90Icjp8jbIs5cPgIWLkIu8Qm549RghFzB3bn+EmCQSe4cxvyDMN3ndClp +3YMXpZgXWgJGiPOylVi/OwHDdWDBorw4hvry+6yDtpQo2TuI2A/xtxXPT7BgsEJD +WGffdgZOYXChcFA0c1XVLIYlu2w2JhxS8c2TUF6uSDlmcoONNKVoiNCuu1Z9MorS +Qmg7a2G7dSPu123KcTcSQFcmJrt+1G81gOBtHB69kacD8xDmgksj09h/ODPL/gIU +1ZcU2ci1/QIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQB0Tb1JbLXp/BvWovSAhO/j +wG7UEaUA1rCtkDB+fV2HS9bxCbV5eErdg8AMHKgB51ygUrq95vm/baZmUILr84XK +uTEoxxrw5S9Z7SrhtbOpKCumoSeTsCPjDvCcwFExHv4XHFk+CPqZwbMHueVIMT0+ +nGWss/KecCPdJLdnUgMRz0tIuXzkoRuOiUiZfUeyBNVNbDFSrLigYshTeAPGaYjX +CypoHxkeS93nWfOMUu8FTYLYkvGMU5i076zDoFGKJiEtbjSiNW+Hei7u2aSEuCzp +qyTKzYPWYffAq3MM2MKJgZdL04e9GEGeuce/qhM1o3q77aI/XJImwEDdut2LDec1 +-----END CERTIFICATE----- diff --git a/internal/testdata/cert.go b/internal/testdata/cert.go new file mode 100644 index 00000000..bc1a8f44 --- /dev/null +++ b/internal/testdata/cert.go @@ -0,0 +1,54 @@ +package testdata + +import ( + "crypto/tls" + "crypto/x509" + "path" + "runtime" +) + +var certPath string + +func init() { + _, filename, _, ok := runtime.Caller(0) + if !ok { + panic("Failed to get current frame") + } + + certPath = path.Dir(filename) +} + +// GetCertificatePaths returns the paths to certificate and key +func GetCertificatePaths() (string, string) { + return path.Join(certPath, "cert.pem"), path.Join(certPath, "priv.key") +} + +// GetTLSConfig returns a tls config for quic.clemente.io +func GetTLSConfig() *tls.Config { + cert, err := tls.LoadX509KeyPair(GetCertificatePaths()) + if err != nil { + panic(err) + } + return &tls.Config{ + Certificates: []tls.Certificate{cert}, + } +} + +// AddRootCA adds the root CA certificate to a cert pool +func AddRootCA(certPool *x509.CertPool) { + caCertPath := path.Join(certPath, "ca.pem") + caCertRaw, err := os.ReadFile(caCertPath) + if err != nil { + panic(err) + } + if ok := certPool.AppendCertsFromPEM(caCertRaw); !ok { + panic("Could not add root ceritificate to pool.") + } +} + +// GetRootCA returns an x509.CertPool containing (only) the CA certificate +func GetRootCA() *x509.CertPool { + pool := x509.NewCertPool() + AddRootCA(pool) + return pool +} diff --git a/internal/testdata/cert.pem b/internal/testdata/cert.pem new file mode 100644 index 00000000..91d1aa9e --- /dev/null +++ b/internal/testdata/cert.pem @@ -0,0 +1,18 @@ +-----BEGIN CERTIFICATE----- +MIIC1TCCAb2gAwIBAgIJAK2fcqC0BVA7MA0GCSqGSIb3DQEBCwUAMCgxJjAkBgNV +BAoMHXF1aWMtZ28gQ2VydGlmaWNhdGUgQXV0aG9yaXR5MB4XDTIwMDgxODA5MjEz +NVoXDTMwMDgxNjA5MjEzNVowEjEQMA4GA1UECgwHcXVpYy1nbzCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAN/YwrigSXdJCL/bdBGhb0UpqtU8H+krV870 ++w1yCSykLImH8x3qHZEXt9sr/vgjcJoV6Z15RZmnbEqnAx84sIClIBoIgnk0VPxu +WF+/U/dElbftCfYcfJAddhRckdmGB+yb3Wogb32UJ+q3my++h6NjHsYb+OwpJPnQ +meXjOE7Kkf+bXfFywHF3R8kzVdh5JUFYeKbxYmYgxRps1YTsbCrZCrSy1CbQ9FJw +Wg5C8t+7yvVFmOeWPECypBCz2xS2mu+kycMNIjIWMl0SL7oVM5cBkRKPeVIG/KcM +i5+/4lRSLoPh0Txh2TKBWfpzLbIOdPU8/O7cAukIGWx0XsfHUQMCAwEAAaMYMBYw +FAYDVR0RBA0wC4IJbG9jYWxob3N0MA0GCSqGSIb3DQEBCwUAA4IBAQAyxxvebdMz +shp5pt1SxMOSXbo8sTa1cpaf2rTmb4nxjXs6KPBEn53hSBz9bhe5wXE4f94SHadf +636rLh3d75KgrLUwO9Yq0HfCxMo1jUV/Ug++XwcHCI9vk58Tk/H4hqEM6C8RrdTj +fYeuegQ0/oNLJ4uTw2P2A8TJbL6FC2dcICEAvUGZUcVyZ8m8tHXNRYYh6MZ7ubCh +hinvL+AA5fY6EVlc5G/P4DN6fYxGn1cFNbiL4uZP4+W3dOmP+NV0YV9ihTyMzz0R +vSoOZ9FeVkyw8EhMb3LoyXYKazvJy2VQST1ltzAGit9RiM1Gv4vuna74WsFzrn1U +A/TbaR0ih/qG +-----END CERTIFICATE----- diff --git a/internal/testdata/cert_test.go b/internal/testdata/cert_test.go new file mode 100644 index 00000000..e21fb61d --- /dev/null +++ b/internal/testdata/cert_test.go @@ -0,0 +1,29 @@ +package testdata + +import ( + "crypto/tls" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("certificates", func() { + It("returns certificates", func() { + ln, err := tls.Listen("tcp", "localhost:4433", GetTLSConfig()) + Expect(err).ToNot(HaveOccurred()) + + go func() { + defer GinkgoRecover() + conn, err := ln.Accept() + Expect(err).ToNot(HaveOccurred()) + defer conn.Close() + _, err = conn.Write([]byte("foobar")) + Expect(err).ToNot(HaveOccurred()) + }() + + conn, err := tls.Dial("tcp", "localhost:4433", &tls.Config{RootCAs: GetRootCA()}) + Expect(err).ToNot(HaveOccurred()) + data, err := io.ReadAll(conn) + Expect(err).ToNot(HaveOccurred()) + Expect(string(data)).To(Equal("foobar")) + }) +}) diff --git a/internal/testdata/generate_key.sh b/internal/testdata/generate_key.sh new file mode 100755 index 00000000..7ecaa966 --- /dev/null +++ b/internal/testdata/generate_key.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +set -e + +echo "Generating CA key and certificate:" +openssl req -x509 -sha256 -nodes -days 3650 -newkey rsa:2048 \ + -keyout ca.key -out ca.pem \ + -subj "/O=quic-go Certificate Authority/" + +echo "Generating CSR" +openssl req -out cert.csr -new -newkey rsa:2048 -nodes -keyout priv.key \ + -subj "/O=quic-go/" + +echo "Sign certificate:" +openssl x509 -req -sha256 -days 3650 -in cert.csr -out cert.pem \ + -CA ca.pem -CAkey ca.key -CAcreateserial \ + -extfile <(printf "subjectAltName=DNS:localhost") + +# debug output the certificate +openssl x509 -noout -text -in cert.pem + +# we don't need the CA key, the serial number and the CSR any more +rm ca.key cert.csr ca.srl + diff --git a/internal/testdata/priv.key b/internal/testdata/priv.key new file mode 100644 index 00000000..56b8d894 --- /dev/null +++ b/internal/testdata/priv.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDf2MK4oEl3SQi/ +23QRoW9FKarVPB/pK1fO9PsNcgkspCyJh/Md6h2RF7fbK/74I3CaFemdeUWZp2xK +pwMfOLCApSAaCIJ5NFT8blhfv1P3RJW37Qn2HHyQHXYUXJHZhgfsm91qIG99lCfq +t5svvoejYx7GG/jsKST50Jnl4zhOypH/m13xcsBxd0fJM1XYeSVBWHim8WJmIMUa +bNWE7Gwq2Qq0stQm0PRScFoOQvLfu8r1RZjnljxAsqQQs9sUtprvpMnDDSIyFjJd +Ei+6FTOXAZESj3lSBvynDIufv+JUUi6D4dE8YdkygVn6cy2yDnT1PPzu3ALpCBls +dF7Hx1EDAgMBAAECggEBAMm+mLDBdbUWk9YmuZNyRdC13wvT5obF05vo26OglXgw +dxt09b6OVBuCnuff3SpS9pdJDIYq2HnFlSorH/sxopIvQKF17fHDIp1n7ipNTCXd +IHrmHkY8Il/YzaVIUQMVc2rih0mw9greTqOS20DKnYC6QvAWIeDmrDaitTGl+ge3 +hm7e2lsgZi13R6fTNwQs9geEQSGzP2k7bFceHQFDChOYiQraR5+VZZ8S8AMGjk47 +AUa5EsKeUe6O9t2xuDSFxzYz5eadOAiErKGDos5KXXr3VQgFcC8uPEFFjcJ/yl+8 +tOe4iLeVwGSDJhTAThdR2deJOjaDcarWM7ixmxA3DAECgYEA/WVwmY4gWKwv49IJ +Jnh1Gu93P772GqliMNpukdjTI+joQxfl4jRSt2hk4b1KRwyT9aaKfvdz0HFlXo/r +9NVSAYT3/3vbcw61bfvPhhtz44qRAAKua6b5cUM6XqxVt1hqdP8lrf/blvA5ln+u +O51S8+wpxZMuqKz/29zdWSG6tAMCgYEA4iWXMXX9dZajI6abVkWwuosvOakXdLk4 +tUy7zd+JPF7hmUzzj2gtg4hXoiQPAOi+GY3TX+1Nza3s1LD7iWaXSKeOWvvligw9 +Q/wVTNW2P1+tdhScJf9QudzW69xOm5HNBgx9uWV2cHfjC12vg5aTH0k5axvaq15H +9WBXlH5q3wECgYBYoYGYBDFmMpvxmMagkSOMz1OrlVSpkLOKmOxx0SBRACc1SIec +7mY8RqR6nOX9IfYixyTMMittLiyhvb9vfKnZZDQGRcFFZlCpbplws+t+HDqJgWaW +uumm5zfkY2z7204pLBF24fZhvha2gGRl76pTLTiTJd79Gr3HnmJByd1vFwKBgHL7 +vfYuEeM55lT4Hz8sTAFtR2O/7+cvTgAQteSlZbfGXlp939DonUulhTkxsFc7/3wq +unCpzcdoSWSTYDGqcf1FBIKKVVltg7EPeR0KBJIQabgCHqrLOBZojPZ7m5RJ+765 +lysuxZvFuTFMPzNe2gssRf+JuBMt6tR+WclsxZYBAoGAEEFs1ppDil1xlP5rdH7T +d3TSw/u4eU/X8Ei1zi25hdRUiV76fP9fBELYFmSrPBhugYv91vtSv/LmD4zLfLv/ +yzwAD9j1lGbgM8Of8klCkk+XSJ88ryUwnMTJ5loQJW8t4L+zLv5Le7Ca9SAT0kJ1 +jT0GzDymgLMGp8RPdBkpk+w= +-----END PRIVATE KEY----- diff --git a/internal/testdata/testdata_suite_test.go b/internal/testdata/testdata_suite_test.go new file mode 100644 index 00000000..4e9011cf --- /dev/null +++ b/internal/testdata/testdata_suite_test.go @@ -0,0 +1,13 @@ +package testdata + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestTestdata(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Testdata Suite") +} diff --git a/internal/tests/assert.go b/internal/tests/assert.go new file mode 100644 index 00000000..29adad29 --- /dev/null +++ b/internal/tests/assert.go @@ -0,0 +1,111 @@ +package tests + +import ( + "go/token" + "reflect" + "strings" + "testing" + "unsafe" +) + +func AssertIsNil(t *testing.T, v interface{}) { + if !isNil(v) { + t.Errorf("[%v] was expected to be nil", v) + } +} + +func AssertAllNotNil(t *testing.T, vv ...interface{}) { + for _, v := range vv { + AssertNotNil(t, v) + } +} + +func AssertNotNil(t *testing.T, v interface{}) { + if isNil(v) { + t.Fatalf("[%v] was expected to be non-nil", v) + } +} + +func AssertEqual(t *testing.T, e, g interface{}) { + if !equal(e, g) { + t.Errorf("Expected [%+v], got [%+v]", e, g) + } + return +} + +func AssertNoError(t *testing.T, err error) { + if err != nil { + t.Errorf("Error occurred [%v]", err) + } +} + +func AssertErrorContains(t *testing.T, err error, s string) { + if err == nil { + t.Error("err is nil") + return + } + if !strings.Contains(err.Error(), s) { + t.Errorf("%q is not included in error %q", s, err.Error()) + } +} + +func AssertContains(t *testing.T, s, substr string, shouldContain bool) { + s = strings.ToLower(s) + isContain := strings.Contains(s, substr) + if shouldContain { + if !isContain { + t.Errorf("%q is not included in %s", substr, s) + } + } else { + if isContain { + t.Errorf("%q is included in %q", substr, s) + } + } +} + +func AssertClone(t *testing.T, e, g interface{}) { + ev := reflect.ValueOf(e).Elem() + gv := reflect.ValueOf(g).Elem() + et := ev.Type() + + for i := 0; i < ev.NumField(); i++ { + sf := ev.Field(i) + st := et.Field(i) + + var ee, gg interface{} + if !token.IsExported(st.Name) { + ee = reflect.NewAt(sf.Type(), unsafe.Pointer(sf.UnsafeAddr())).Elem().Interface() + gg = reflect.NewAt(sf.Type(), unsafe.Pointer(gv.Field(i).UnsafeAddr())).Elem().Interface() + } else { + ee = sf.Interface() + gg = gv.Field(i).Interface() + } + if sf.Kind() == reflect.Func || sf.Kind() == reflect.Slice || sf.Kind() == reflect.Ptr { + if ee != nil { + if gg == nil { + t.Errorf("Field %s.%s is nil", et.Name(), et.Field(i).Name) + } + } + continue + } + if !reflect.DeepEqual(ee, gg) { + t.Errorf("Field %s.%s is not equal, expected [%v], got [%v]", et.Name(), et.Field(i).Name, ee, gg) + } + } +} + +func equal(expected, got interface{}) bool { + return reflect.DeepEqual(expected, got) +} + +func isNil(v interface{}) bool { + if v == nil { + return true + } + rv := reflect.ValueOf(v) + kind := rv.Kind() + if kind >= reflect.Chan && kind <= reflect.Slice && rv.IsNil() { + return true + } + return false +} diff --git a/internal/tests/condition.go b/internal/tests/condition.go new file mode 100644 index 00000000..46816d79 --- /dev/null +++ b/internal/tests/condition.go @@ -0,0 +1,17 @@ +package tests + +import "time" + +// WaitCondition reports whether fn eventually returned true, +// checking immediately and then every checkEvery amount, +// until waitFor has elapsed, at which point it returns false. +func WaitCondition(waitFor, checkEvery time.Duration, fn func() bool) bool { + deadline := time.Now().Add(waitFor) + for time.Now().Before(deadline) { + if fn() { + return true + } + time.Sleep(checkEvery) + } + return false +} diff --git a/internal/tests/file.go b/internal/tests/file.go new file mode 100644 index 00000000..fc7753a5 --- /dev/null +++ b/internal/tests/file.go @@ -0,0 +1,18 @@ +package tests + +import ( + "os" + "path/filepath" +) + +var testDataPath string + +func init() { + pwd, _ := os.Getwd() + testDataPath = filepath.Join(pwd, ".testdata") +} + +// GetTestFilePath return test file absolute path. +func GetTestFilePath(filename string) string { + return filepath.Join(testDataPath, filename) +} diff --git a/internal/tests/net.go b/internal/tests/net.go new file mode 100644 index 00000000..da3f7e05 --- /dev/null +++ b/internal/tests/net.go @@ -0,0 +1,17 @@ +package tests + +import ( + "net" + "testing" +) + +func NewLocalListener(t *testing.T) net.Listener { + ln, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + ln, err = net.Listen("tcp6", "[::1]:0") + } + if err != nil { + t.Fatal(err) + } + return ln +} diff --git a/internal/tests/reader.go b/internal/tests/reader.go new file mode 100644 index 00000000..251dd4b5 --- /dev/null +++ b/internal/tests/reader.go @@ -0,0 +1,10 @@ +package tests + +type NeverEnding byte + +func (b NeverEnding) Read(p []byte) (int, error) { + for i := range p { + p[i] = byte(b) + } + return len(p), nil +} diff --git a/internal/tests/transport.go b/internal/tests/transport.go new file mode 100644 index 00000000..ca8701d2 --- /dev/null +++ b/internal/tests/transport.go @@ -0,0 +1 @@ +package tests diff --git a/internal/transport/option.go b/internal/transport/option.go new file mode 100644 index 00000000..78c88c19 --- /dev/null +++ b/internal/transport/option.go @@ -0,0 +1,181 @@ +package transport + +import ( + "context" + "crypto/tls" + "net" + "net/http" + "net/url" + "time" + + "github.com/imroc/req/v3/internal/dump" +) + +// Options is transport's options. +type Options struct { + // Proxy specifies a function to return a proxy for a given + // Request. If the function returns a non-nil error, the + // request is aborted with the provided error. + // + // The proxy type is determined by the URL scheme. "http", + // "https", "socks5", and "socks5h" are supported. If the scheme is empty, + // "http" is assumed. + // "socks5" is treated the same as "socks5h". + // + // If the proxy URL contains a userinfo subcomponent, + // the proxy request will pass the username and password + // in a Proxy-Authorization header. + // + // If Proxy is nil or returns a nil *URL, no proxy is used. + Proxy func(*http.Request) (*url.URL, error) + + // OnProxyConnectResponse is called when the Transport gets an HTTP response from + // a proxy for a CONNECT request. It's called before the check for a 200 OK response. + // If it returns an error, the request fails with that error. + OnProxyConnectResponse func(ctx context.Context, proxyURL *url.URL, connectReq *http.Request, connectRes *http.Response) error + + // DialContext specifies the dial function for creating unencrypted TCP connections. + // If DialContext is nil, then the transport dials using package net. + // + // DialContext runs concurrently with calls to RoundTrip. + // A RoundTrip call that initiates a dial may end up using + // a connection dialed previously when the earlier connection + // becomes idle before the later DialContext completes. + DialContext func(ctx context.Context, network, addr string) (net.Conn, error) + + // DialTLSContext specifies an optional dial function for creating + // TLS connections for non-proxied HTTPS requests. + // + // If DialTLSContext is nil, DialContext and TLSClientConfig are used. + // + // If DialTLSContext is set, the Dial and DialContext hooks are not used for HTTPS + // requests and the TLSClientConfig and TLSHandshakeTimeout + // are ignored. The returned net.Conn is assumed to already be + // past the TLS handshake. + DialTLSContext func(ctx context.Context, network, addr string) (net.Conn, error) + + // TLSHandshakeContext specifies an optional dial function for tls handshake, + // it works even if a proxy is set, can be used to customize the tls fingerprint. + TLSHandshakeContext func(ctx context.Context, addr string, plainConn net.Conn) (conn net.Conn, tlsState *tls.ConnectionState, err error) + + // TLSClientConfig specifies the TLS configuration to use with + // tls.Client. + // If nil, the default configuration is used. + // If non-nil, HTTP/2 support may not be enabled by default. + TLSClientConfig *tls.Config + + // TLSHandshakeTimeout specifies the maximum amount of time to + // wait for a TLS handshake. Zero means no timeout. + TLSHandshakeTimeout time.Duration + + // DisableKeepAlives, if true, disables HTTP keep-alives and + // will only use the connection to the server for a single + // HTTP request. + // + // This is unrelated to the similarly named TCP keep-alives. + DisableKeepAlives bool + + // DisableCompression, if true, prevents the Transport from + // requesting compression with an "Accept-Encoding: gzip" + // request header when the Request contains no existing + // Accept-Encoding value. If the Transport requests gzip on + // its own and gets a gzipped response, it's transparently + // decoded in the Response.Body. However, if the user + // explicitly requested gzip it is not automatically + // uncompressed. + DisableCompression bool + + // AutoDecompression, if true, enables automatic decompression of + // compressed responses. It is equivalent to setting the Accept-Encoding + // header to "gzip, deflate, br, zstd" and the Transport will handle the + // decompression of the response transparently, returning the uncompressed. + AutoDecompression bool + + // EnableH2C, if true, enables http2 over plain http without tls. + EnableH2C bool + + // MaxIdleConns controls the maximum number of idle (keep-alive) + // connections across all hosts. Zero means no limit. + MaxIdleConns int + + // MaxIdleConnsPerHost, if non-zero, controls the maximum idle + // (keep-alive) connections to keep per-host. If zero, + // defaultMaxIdleConnsPerHost is used. + MaxIdleConnsPerHost int + + // MaxConnsPerHost optionally limits the total number of + // connections per host, including connections in the dialing, + // active, and idle states. On limit violation, dials will block. + // + // Zero means no limit. + MaxConnsPerHost int + + // IdleConnTimeout is the maximum amount of time an idle + // (keep-alive) connection will remain idle before closing + // itself. + // Zero means no limit. + IdleConnTimeout time.Duration + + // ResponseHeaderTimeout, if non-zero, specifies the amount of + // time to wait for a server's response headers after fully + // writing the request (including its body, if any). This + // time does not include the time to read the response body. + ResponseHeaderTimeout time.Duration + + // ExpectContinueTimeout, if non-zero, specifies the amount of + // time to wait for a server's first response headers after fully + // writing the request headers if the request has an + // "Expect: 100-continue" header. Zero means no timeout and + // causes the body to be sent immediately, without + // waiting for the server to approve. + // This time does not include the time to send the request header. + ExpectContinueTimeout time.Duration + + // ProxyConnectHeader optionally specifies headers to send to + // proxies during CONNECT requests. + // To set the header dynamically, see GetProxyConnectHeader. + ProxyConnectHeader http.Header + + // GetProxyConnectHeader optionally specifies a func to return + // headers to send to proxyURL during a CONNECT request to the + // ip:port target. + // If it returns an error, the Transport's RoundTrip fails with + // that error. It can return (nil, nil) to not add headers. + // If GetProxyConnectHeader is non-nil, ProxyConnectHeader is + // ignored. + GetProxyConnectHeader func(ctx context.Context, proxyURL *url.URL, target string) (http.Header, error) + + // MaxResponseHeaderBytes specifies a limit on how many + // response bytes are allowed in the server's response + // header. + // + // Zero means to use a default limit. + MaxResponseHeaderBytes int64 + + // WriteBufferSize specifies the size of the write buffer used + // when writing to the transport. + // If zero, a default (currently 4KB) is used. + WriteBufferSize int + + // ReadBufferSize specifies the size of the read buffer used + // when reading from the transport. + // If zero, a default (currently 4KB) is used. + ReadBufferSize int + + // Debugf is the optional debug function. + Debugf func(format string, v ...interface{}) + + Dump *dump.Dumper +} + +func (o Options) Clone() Options { + oo := o + if o.TLSClientConfig != nil { + oo.TLSClientConfig = o.TLSClientConfig.Clone() + } + if o.Dump != nil { + oo.Dump = o.Dump.Clone() + go oo.Dump.Start() + } + return oo +} diff --git a/internal/util/util.go b/internal/util/util.go new file mode 100644 index 00000000..80b56058 --- /dev/null +++ b/internal/util/util.go @@ -0,0 +1,107 @@ +package util + +import ( + "bytes" + "encoding/base64" + "os" + "reflect" + "strings" +) + +// IsJSONType method is to check JSON content type or not +func IsJSONType(ct string) bool { + return strings.Contains(ct, "json") +} + +// IsXMLType method is to check XML content type or not +func IsXMLType(ct string) bool { + return strings.Contains(ct, "xml") +} + +// GetPointer return the pointer of the interface. +func GetPointer(v interface{}) interface{} { + t := reflect.TypeOf(v) + if t.Kind() == reflect.Ptr { + if tt := t.Elem(); tt.Kind() == reflect.Ptr { // pointer of pointer + if tt.Elem().Kind() == reflect.Ptr { + panic("pointer of pointer of pointer is not supported") + } + el := reflect.ValueOf(v).Elem() + if el.IsZero() { + vv := reflect.New(tt.Elem()) + el.Set(vv) + return vv.Interface() + } else { + return el.Interface() + } + } else { + if reflect.ValueOf(v).IsZero() { + vv := reflect.New(t.Elem()) + return vv.Interface() + } + return v + } + } + return reflect.New(t).Interface() +} + +// GetType return the underlying type. +func GetType(v interface{}) reflect.Type { + return reflect.Indirect(reflect.ValueOf(v)).Type() +} + +// CutString slices s around the first instance of sep, +// returning the text before and after sep. +// The found result reports whether sep appears in s. +// If sep does not appear in s, cut returns s, "", false. +func CutString(s, sep string) (before, after string, found bool) { + if i := strings.Index(s, sep); i >= 0 { + return s[:i], s[i+len(sep):], true + } + return s, "", false +} + +// CutBytes slices s around the first instance of sep, +// returning the text before and after sep. +// The found result reports whether sep appears in s. +// If sep does not appear in s, cut returns s, nil, false. +// +// CutBytes returns slices of the original slice s, not copies. +func CutBytes(s, sep []byte) (before, after []byte, found bool) { + if i := bytes.Index(s, sep); i >= 0 { + return s[:i], s[i+len(sep):], true + } + return s, nil, false +} + +// IsStringEmpty method tells whether given string is empty or not +func IsStringEmpty(str string) bool { + return len(strings.TrimSpace(str)) == 0 +} + +// See 2 (end of page 4) https://www.ietf.org/rfc/rfc2617.txt +// "To receive authorization, the client sends the userid and password, +// separated by a single colon (":") character, within a base64 +// encoded string in the credentials." +// It is not meant to be urlencoded. +func basicAuth(username, password string) string { + auth := username + ":" + password + return base64.StdEncoding.EncodeToString([]byte(auth)) +} + +// BasicAuthHeaderValue return the header of basic auth. +func BasicAuthHeaderValue(username, password string) string { + return "Basic " + basicAuth(username, password) +} + +// CreateDirectory create the directory. +func CreateDirectory(dir string) (err error) { + if _, err = os.Stat(dir); err != nil { + if os.IsNotExist(err) { + if err = os.MkdirAll(dir, 0755); err != nil { + return + } + } + } + return +} diff --git a/logger.go b/logger.go new file mode 100644 index 00000000..749ec6b0 --- /dev/null +++ b/logger.go @@ -0,0 +1,61 @@ +package req + +import ( + "io" + "log" + "os" +) + +// Logger is the abstract logging interface, gives control to +// the Req users, choice of the logger. +type Logger interface { + Errorf(format string, v ...interface{}) + Warnf(format string, v ...interface{}) + Debugf(format string, v ...interface{}) +} + +// NewLogger create a Logger wraps the *log.Logger +func NewLogger(output io.Writer, prefix string, flag int) Logger { + return &logger{l: log.New(output, prefix, flag)} +} + +func NewLoggerFromStandardLogger(l *log.Logger) Logger { + return &logger{l: l} +} + +func createDefaultLogger() Logger { + return NewLogger(os.Stdout, "", log.Ldate|log.Lmicroseconds) +} + +var _ Logger = (*logger)(nil) + +type disableLogger struct{} + +func (l *disableLogger) Errorf(format string, v ...interface{}) {} +func (l *disableLogger) Warnf(format string, v ...interface{}) {} +func (l *disableLogger) Debugf(format string, v ...interface{}) {} + +type logger struct { + l *log.Logger +} + +func (l *logger) Errorf(format string, v ...interface{}) { + l.output("ERROR", format, v...) +} + +func (l *logger) Warnf(format string, v ...interface{}) { + l.output("WARN", format, v...) +} + +func (l *logger) Debugf(format string, v ...interface{}) { + l.output("DEBUG", format, v...) +} + +func (l *logger) output(level, format string, v ...interface{}) { + format = level + " [req] " + format + if len(v) == 0 { + l.l.Print(format) + return + } + l.l.Printf(format, v...) +} diff --git a/logger_test.go b/logger_test.go new file mode 100644 index 00000000..8a456da8 --- /dev/null +++ b/logger_test.go @@ -0,0 +1,31 @@ +package req + +import ( + "bytes" + "log" + "testing" + + "github.com/imroc/req/v3/internal/tests" +) + +func TestLogger(t *testing.T) { + buf := new(bytes.Buffer) + l := NewLogger(buf, "", log.Ldate|log.Lmicroseconds) + c := tc().SetLogger(l) + c.SetProxyURL(":=\\<>ksfj&*&sf") + tests.AssertContains(t, buf.String(), "error", true) + buf.Reset() + c.R().SetOutput(nil) + tests.AssertContains(t, buf.String(), "warn", true) +} + +func TestLoggerFromStandardLogger(t *testing.T) { + buf := new(bytes.Buffer) + l := NewLoggerFromStandardLogger(log.New(buf, "", log.Ldate|log.Lmicroseconds)) + c := tc().SetLogger(l) + c.SetProxyURL(":=\\<>ksfj&*&sf") + tests.AssertContains(t, buf.String(), "error", true) + buf.Reset() + c.R().SetOutput(nil) + tests.AssertContains(t, buf.String(), "warn", true) +} diff --git a/middleware.go b/middleware.go new file mode 100644 index 00000000..df5d9d9f --- /dev/null +++ b/middleware.go @@ -0,0 +1,549 @@ +package req + +import ( + "bytes" + "errors" + "io" + "mime/multipart" + "net/http" + "net/textproto" + "net/url" + "os" + "path/filepath" + "reflect" + "strings" + "time" + + "github.com/imroc/req/v3/internal/header" + "github.com/imroc/req/v3/internal/util" +) + +type ( + // RequestMiddleware type is for request middleware, called before a request is sent + RequestMiddleware func(client *Client, req *Request) error + + // ResponseMiddleware type is for response middleware, called after a response has been received + ResponseMiddleware func(client *Client, resp *Response) error +) + +func createMultipartHeader(file *FileUpload, contentType string) textproto.MIMEHeader { + hdr := make(textproto.MIMEHeader) + + contentDispositionValue := "form-data" + cd := new(ContentDisposition) + if file.ParamName != "" { + cd.Add("name", file.ParamName) + } + if file.FileName != "" { + cd.Add("filename", file.FileName) + } + if file.ExtraContentDisposition != nil { + for _, kv := range file.ExtraContentDisposition.kv { + cd.Add(kv.Key, kv.Value) + } + } + if c := cd.string(); c != "" { + contentDispositionValue += c + } + hdr.Set("Content-Disposition", contentDispositionValue) + + if !util.IsStringEmpty(contentType) { + hdr.Set(header.ContentType, contentType) + } + return hdr +} + +func closeq(v interface{}) { + if c, ok := v.(io.Closer); ok { + c.Close() + } +} + +func writeMultipartFormFile(w *multipart.Writer, file *FileUpload, r *Request) error { + content, err := file.GetFileContent() + if err != nil { + return err + } + defer content.Close() + if r.RetryAttempt > 0 { // reset file reader when retry a multipart file upload + if rs, ok := content.(io.ReadSeeker); ok { + _, err = rs.Seek(0, io.SeekStart) + if err != nil { + return err + } + } + } + // Auto detect actual multipart content type + cbuf := make([]byte, 512) + seeEOF := false + lastTime := time.Now() + size, err := content.Read(cbuf) + if err != nil { + if err == io.EOF { + seeEOF = true + } else { + return err + } + } + + ct := file.ContentType + if ct == "" { + ct = http.DetectContentType(cbuf) + } + pw, err := w.CreatePart(createMultipartHeader(file, ct)) + if err != nil { + return err + } + + if r.forceChunkedEncoding && r.uploadCallback != nil { + pw = &callbackWriter{ + Writer: pw, + lastTime: lastTime, + interval: r.uploadCallbackInterval, + totalSize: file.FileSize, + callback: func(written int64) { + r.uploadCallback(UploadInfo{ + ParamName: file.ParamName, + FileName: file.FileName, + FileSize: file.FileSize, + UploadedSize: written, + }) + }, + } + } + + if _, err = pw.Write(cbuf[:size]); err != nil { + return err + } + if seeEOF { + return nil + } + + _, err = io.Copy(pw, content) + return err +} + +func writeMultiPart(r *Request, w *multipart.Writer) { + defer w.Close() // close multipart to write tailer boundary + if len(r.FormData) > 0 { + for k, vs := range r.FormData { + for _, v := range vs { + w.WriteField(k, v) + } + } + } else if len(r.OrderedFormData) > 0 { + if len(r.OrderedFormData)%2 != 0 { + r.error = errBadOrderedFormData + return + } + maxIndex := len(r.OrderedFormData) - 2 + for i := 0; i <= maxIndex; i += 2 { + key := r.OrderedFormData[i] + value := r.OrderedFormData[i+1] + w.WriteField(key, value) + } + } + for _, file := range r.uploadFiles { + writeMultipartFormFile(w, file, r) + } +} + +func handleMultiPart(c *Client, r *Request) (err error) { + var b string + if c.multipartBoundaryFunc != nil { + b = c.multipartBoundaryFunc() + } + + if r.forceChunkedEncoding { + pr, pw := io.Pipe() + r.GetBody = func() (io.ReadCloser, error) { + return pr, nil + } + w := multipart.NewWriter(pw) + if len(b) > 0 { + w.SetBoundary(b) + } + r.SetContentType(w.FormDataContentType()) + go func() { + writeMultiPart(r, w) + pw.Close() // close pipe writer so that pipe reader could get EOF, and stop upload + }() + } else { + buf := new(bytes.Buffer) + w := multipart.NewWriter(buf) + if len(b) > 0 { + w.SetBoundary(b) + } + writeMultiPart(r, w) + r.GetBody = func() (io.ReadCloser, error) { + return io.NopCloser(bytes.NewReader(buf.Bytes())), nil + } + r.Body = buf.Bytes() + r.SetContentType(w.FormDataContentType()) + } + return +} + +func handleFormData(r *Request) { + r.SetContentType(header.FormContentType) + r.SetBodyBytes([]byte(r.FormData.Encode())) +} + +var errBadOrderedFormData = errors.New("bad ordered form data, the number of key-value pairs should be an even number") + +func handleOrderedFormData(r *Request) { + r.SetContentType(header.FormContentType) + if len(r.OrderedFormData)%2 != 0 { + r.error = errBadOrderedFormData + return + } + maxIndex := len(r.OrderedFormData) - 2 + var buf strings.Builder + for i := 0; i <= maxIndex; i += 2 { + key := r.OrderedFormData[i] + value := r.OrderedFormData[i+1] + if buf.Len() > 0 { + buf.WriteByte('&') + } + buf.WriteString(url.QueryEscape(key)) + buf.WriteByte('=') + buf.WriteString(url.QueryEscape(value)) + } + r.SetBodyString(buf.String()) +} + +func handleMarshalBody(c *Client, r *Request) error { + ct := "" + if r.Headers != nil { + ct = r.Headers.Get(header.ContentType) + } + if ct == "" { + ct = c.Headers.Get(header.ContentType) + } + if ct != "" { + if util.IsXMLType(ct) { + body, err := c.xmlMarshal(r.marshalBody) + if err != nil { + return err + } + r.SetBodyBytes(body) + } else { + body, err := c.jsonMarshal(r.marshalBody) + if err != nil { + return err + } + r.SetBodyBytes(body) + } + return nil + } + body, err := c.jsonMarshal(r.marshalBody) + if err != nil { + return err + } + r.SetBodyJsonBytes(body) + return nil +} + +func parseRequestBody(c *Client, r *Request) (err error) { + if c.isPayloadForbid(r.Method) { + r.marshalBody = nil + r.Body = nil + r.GetBody = nil + return + } + // handle multipart + if r.isMultiPart { + return handleMultiPart(c, r) + } + + // handle form data + if len(c.FormData) > 0 { + r.SetFormDataFromValues(c.FormData) + } + + if len(r.FormData) > 0 { + handleFormData(r) + return + } else if len(r.OrderedFormData) > 0 { + handleOrderedFormData(r) + return + } + + // handle marshal body + if r.marshalBody != nil { + err = handleMarshalBody(c, r) + if err != nil { + return + } + } + + if r.Body == nil { + return + } + // body is in-memory []byte, so we can guess content type + + if c.Headers != nil && c.Headers.Get(header.ContentType) != "" { // ignore if content type set at client-level + return + } + if r.getHeader(header.ContentType) != "" { // ignore if content-type set at request-level + return + } + r.SetContentType(http.DetectContentType(r.Body)) + return +} + +func unmarshalBody(c *Client, r *Response, v interface{}) (err error) { + body, err := r.ToBytes() // in case req.SetResult or req.SetError with cient.DisalbeAutoReadResponse(true) + if err != nil { + return + } + ct := r.GetContentType() + if util.IsJSONType(ct) { + return c.jsonUnmarshal(body, v) + } else if util.IsXMLType(ct) { + return c.xmlUnmarshal(body, v) + } else { + if c.DebugLog { + c.log.Debugf("cannot determine the unmarshal function with %q Content-Type, default to json", ct) + } + return c.jsonUnmarshal(body, v) + } +} + +func defaultResultStateChecker(resp *Response) ResultState { + if code := resp.StatusCode; code > 199 && code < 300 { + return SuccessState + } else if code > 399 { + return ErrorState + } else { + return UnknownState + } +} + +func parseResponseBody(c *Client, r *Response) (err error) { + if r.Response == nil { + return + } + req := r.Request + switch r.ResultState() { + case SuccessState: + if req.Result != nil && r.StatusCode != http.StatusNoContent { + err = unmarshalBody(c, r, r.Request.Result) + if err == nil { + r.result = r.Request.Result + } + } + case ErrorState: + if r.StatusCode == http.StatusNoContent { + return + } + if req.Error != nil { + err = unmarshalBody(c, r, req.Error) + if err == nil { + r.error = req.Error + } + } else if c.commonErrorType != nil { + e := reflect.New(c.commonErrorType).Interface() + err = unmarshalBody(c, r, e) + if err == nil { + r.error = e + } + } + } + return +} + +type callbackWriter struct { + io.Writer + written int64 + totalSize int64 + lastTime time.Time + interval time.Duration + callback func(written int64) +} + +func (w *callbackWriter) Write(p []byte) (n int, err error) { + n, err = w.Writer.Write(p) + if n <= 0 { + return + } + w.written += int64(n) + if w.written == w.totalSize { + w.callback(w.written) + } else if now := time.Now(); now.Sub(w.lastTime) >= w.interval { + w.lastTime = now + w.callback(w.written) + } + return +} + +type callbackReader struct { + io.ReadCloser + read int64 + lastRead int64 + callback func(read int64) + lastTime time.Time + interval time.Duration +} + +func (r *callbackReader) Read(p []byte) (n int, err error) { + n, err = r.ReadCloser.Read(p) + if n <= 0 { + if err == io.EOF && r.read > r.lastRead { + r.callback(r.read) + r.lastRead = r.read + } + return + } + r.read += int64(n) + if err == io.EOF { + r.callback(r.read) + r.lastRead = r.read + } else if now := time.Now(); now.Sub(r.lastTime) >= r.interval { + r.lastTime = now + r.callback(r.read) + r.lastRead = r.read + } + return +} + +func handleDownload(c *Client, r *Response) (err error) { + if r.Response == nil || !r.Request.isSaveResponse { + return nil + } + var body io.ReadCloser + + if r.body != nil { // already read + body = io.NopCloser(bytes.NewReader(r.body)) + } else { + body = r.Body + } + + var output io.Writer + if r.Request.outputFile != "" { + file := r.Request.outputFile + if c.outputDirectory != "" && !filepath.IsAbs(file) { + file = c.outputDirectory + string(filepath.Separator) + file + } + + file = filepath.Clean(file) + + if err = util.CreateDirectory(filepath.Dir(file)); err != nil { + return err + } + output, err = os.Create(file) + if err != nil { + return + } + } else { + output = r.Request.output // must not nil + } + + defer func() { + body.Close() + closeq(output) + }() + + _, err = io.Copy(output, body) + r.setReceivedAt() + return +} + +// generate URL +func parseRequestURL(c *Client, r *Request) error { + tempURL := r.RawURL + if len(r.PathParams) > 0 { + for p, v := range r.PathParams { + tempURL = strings.Replace(tempURL, "{"+p+"}", url.PathEscape(v), -1) + } + } + if len(c.PathParams) > 0 { + for p, v := range c.PathParams { + tempURL = strings.Replace(tempURL, "{"+p+"}", url.PathEscape(v), -1) + } + } + + // Parsing request URL + reqURL, err := url.Parse(tempURL) + if err != nil { + return err + } + + if reqURL.Scheme == "" && len(c.scheme) > 0 { // set scheme if missing + reqURL, err = url.Parse(c.scheme + "://" + tempURL) + if err != nil { + return err + } + } + + // If RawURL is relative path then added c.BaseURL into + // the request URL otherwise Request.URL will be used as-is + if !reqURL.IsAbs() { + tempURL = reqURL.String() + if len(tempURL) > 0 && tempURL[0] != '/' { + tempURL = "/" + tempURL + } + + reqURL, err = url.Parse(c.BaseURL + tempURL) + if err != nil { + return err + } + } + + // Adding Query Param + query := make(url.Values) + for k, v := range c.QueryParams { + for _, iv := range v { + query.Add(k, iv) + } + } + + for k, v := range r.QueryParams { + // remove query param from client level by key + // since overrides happens for that key in the request + query.Del(k) + + for _, iv := range v { + query.Add(k, iv) + } + } + + // Preserve query string order partially. + // Since not feasible in `SetQuery*` resty methods, because + // standard package `url.Encode(...)` sorts the query params + // alphabetically + if len(query) > 0 { + if util.IsStringEmpty(reqURL.RawQuery) { + reqURL.RawQuery = query.Encode() + } else { + reqURL.RawQuery = reqURL.RawQuery + "&" + query.Encode() + } + } + + reqURL.Host = removeEmptyPort(reqURL.Host) + r.URL = reqURL + return nil +} + +func parseRequestHeader(c *Client, r *Request) error { + if c.Headers == nil { + return nil + } + if r.Headers == nil { + r.Headers = make(http.Header) + } + for k, vs := range c.Headers { + if len(r.Headers[k]) == 0 { + r.Headers[k] = vs + } + } + return nil +} + +func parseRequestCookie(c *Client, r *Request) error { + if len(c.Cookies) > 0 || r.RetryAttempt <= 0 { + r.Cookies = append(r.Cookies, c.Cookies...) + } + + return nil +} diff --git a/parallel_download.go b/parallel_download.go new file mode 100644 index 00000000..cdff7288 --- /dev/null +++ b/parallel_download.go @@ -0,0 +1,301 @@ +package req + +import ( + "context" + "crypto/md5" + "encoding/hex" + "fmt" + "io" + "math" + urlpkg "net/url" + "os" + "path/filepath" + "strings" + "sync" +) + +type ParallelDownload struct { + url string + client *Client + concurrency int + output io.Writer + filename string + segmentSize int64 + perm os.FileMode + tempRootDir string + tempDir string + taskCh chan *downloadTask + doneCh chan struct{} + wgDoneCh chan struct{} + errCh chan error + wg sync.WaitGroup + taskMap map[int]*downloadTask + taskNotifyCh chan *downloadTask + mu sync.Mutex + lastIndex int +} + +func (pd *ParallelDownload) completeTask(task *downloadTask) { + pd.mu.Lock() + pd.taskMap[task.index] = task + pd.mu.Unlock() + go func() { + select { + case pd.taskNotifyCh <- task: + case <-pd.doneCh: + } + }() +} + +func (pd *ParallelDownload) popTask(index int) *downloadTask { + pd.mu.Lock() + if task, ok := pd.taskMap[index]; ok { + delete(pd.taskMap, index) + pd.mu.Unlock() + return task + } + pd.mu.Unlock() + for { + task := <-pd.taskNotifyCh + if task.index == index { + pd.mu.Lock() + delete(pd.taskMap, index) + pd.mu.Unlock() + return task + } + } +} + +func md5Sum(s string) string { + sum := md5.Sum([]byte(s)) + return hex.EncodeToString(sum[:]) +} + +func (pd *ParallelDownload) ensure() error { + if pd.concurrency <= 0 { + pd.concurrency = 5 + } + if pd.segmentSize <= 0 { + pd.segmentSize = 1073741824 // 10MB + } + if pd.perm == 0 { + pd.perm = 0777 + } + if pd.tempRootDir == "" { + pd.tempRootDir = os.TempDir() + } + pd.tempDir = filepath.Join(pd.tempRootDir, md5Sum(pd.url)) + if pd.client.DebugLog { + pd.client.log.Debugf("use temporary directory %s", pd.tempDir) + pd.client.log.Debugf("download with %d concurrency and %d bytes segment size", pd.concurrency, pd.segmentSize) + } + err := os.MkdirAll(pd.tempDir, os.ModePerm) + if err != nil { + return err + } + + pd.taskCh = make(chan *downloadTask) + pd.doneCh = make(chan struct{}) + pd.wgDoneCh = make(chan struct{}) + pd.errCh = make(chan error) + pd.taskMap = make(map[int]*downloadTask) + pd.taskNotifyCh = make(chan *downloadTask) + return nil +} + +func (pd *ParallelDownload) SetSegmentSize(segmentSize int64) *ParallelDownload { + pd.segmentSize = segmentSize + return pd +} + +func (pd *ParallelDownload) SetTempRootDir(tempRootDir string) *ParallelDownload { + pd.tempRootDir = tempRootDir + return pd +} + +func (pd *ParallelDownload) SetFileMode(perm os.FileMode) *ParallelDownload { + pd.perm = perm + return pd +} + +func (pd *ParallelDownload) SetConcurrency(concurrency int) *ParallelDownload { + pd.concurrency = concurrency + return pd +} + +func (pd *ParallelDownload) SetOutput(output io.Writer) *ParallelDownload { + if output != nil { + pd.output = output + } + return pd +} + +func (pd *ParallelDownload) SetOutputFile(filename string) *ParallelDownload { + pd.filename = filename + return pd +} + +func getRangeTempFile(rangeStart, rangeEnd int64, workerDir string) string { + return filepath.Join(workerDir, fmt.Sprintf("temp-%d-%d", rangeStart, rangeEnd)) +} + +type downloadTask struct { + index int + rangeStart, rangeEnd int64 + tempFilename string + tempFile *os.File +} + +func (pd *ParallelDownload) handleTask(t *downloadTask, ctx ...context.Context) { + pd.wg.Add(1) + defer pd.wg.Done() + t.tempFilename = getRangeTempFile(t.rangeStart, t.rangeEnd, pd.tempDir) + if pd.client.DebugLog { + pd.client.log.Debugf("downloading segment %d-%d", t.rangeStart, t.rangeEnd) + } + file, err := os.OpenFile(t.tempFilename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + pd.errCh <- err + return + } + err = pd.client.Get(pd.url). + SetHeader("Range", fmt.Sprintf("bytes=%d-%d", t.rangeStart, t.rangeEnd)). + SetOutput(file). + Do(ctx...).Err + + if err != nil { + pd.errCh <- err + return + } + t.tempFile = file + pd.completeTask(t) +} + +func (pd *ParallelDownload) startWorker(ctx ...context.Context) { + for { + select { + case t := <-pd.taskCh: + pd.handleTask(t, ctx...) + case <-pd.doneCh: + return + } + } +} + +func (pd *ParallelDownload) mergeFile() { + defer pd.wg.Done() + file, err := pd.getOutputFile() + if err != nil { + pd.errCh <- err + return + } + for i := 0; ; i++ { + task := pd.popTask(i) + tempFile, err := os.Open(task.tempFilename) + if err != nil { + pd.errCh <- err + return + } + _, err = io.Copy(file, tempFile) + tempFile.Close() + if err != nil { + pd.errCh <- err + return + } + if i < pd.lastIndex { + continue + } + break + } + if pd.client.DebugLog { + pd.client.log.Debugf("removing temporary directory %s", pd.tempDir) + } + err = os.RemoveAll(pd.tempDir) + if err != nil { + pd.errCh <- err + } +} + +func (pd *ParallelDownload) Do(ctx ...context.Context) error { + err := pd.ensure() + if err != nil { + return err + } + for i := 0; i < pd.concurrency; i++ { + go pd.startWorker(ctx...) + } + resp := pd.client.Head(pd.url).Do(ctx...) + if resp.Err != nil { + return resp.Err + } + if resp.ContentLength <= 0 { + return fmt.Errorf("bad content length: %d", resp.ContentLength) + } + pd.lastIndex = int(math.Ceil(float64(resp.ContentLength)/float64(pd.segmentSize))) - 1 + pd.wg.Add(1) + go pd.mergeFile() + go func() { + pd.wg.Wait() + close(pd.wgDoneCh) + }() + totalBytes := resp.ContentLength + start := int64(0) + for i := 0; ; i++ { + end := start + (pd.segmentSize - 1) + if end > (totalBytes - 1) { + end = totalBytes - 1 + } + task := &downloadTask{ + index: i, + rangeStart: start, + rangeEnd: end, + } + pd.taskCh <- task + if end < (totalBytes - 1) { + start = end + 1 + continue + } + break + } + select { + case <-pd.wgDoneCh: + if pd.client.DebugLog { + if pd.filename != "" { + pd.client.log.Debugf("download completed from %s to %s", pd.url, pd.filename) + } else { + pd.client.log.Debugf("download completed for %s", pd.url) + } + } + close(pd.doneCh) + case err := <-pd.errCh: + return err + } + return nil +} + +func (pd *ParallelDownload) getOutputFile() (io.Writer, error) { + outputFile := pd.output + if outputFile != nil { + return outputFile, nil + } + if pd.filename == "" { + u, err := urlpkg.Parse(pd.url) + if err != nil { + panic(err) + } + paths := strings.Split(u.Path, "/") + for i := len(paths) - 1; i > 0; i-- { + if paths[i] != "" { + pd.filename = paths[i] + break + } + } + if pd.filename == "" { + pd.filename = "download" + } + } + if pd.client.outputDirectory != "" && !filepath.IsAbs(pd.filename) { + pd.filename = filepath.Join(pd.client.outputDirectory, pd.filename) + } + return os.OpenFile(pd.filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, pd.perm) +} diff --git a/pkg/altsvc/altsvc.go b/pkg/altsvc/altsvc.go new file mode 100644 index 00000000..b464b05d --- /dev/null +++ b/pkg/altsvc/altsvc.go @@ -0,0 +1,60 @@ +package altsvc + +import ( + "sync" + "time" +) + +// AltSvcJar is default implementation of Jar, which stores +// AltSvc in memory. +type AltSvcJar struct { + entries map[string]*AltSvc + mu sync.Mutex +} + +// NewAltSvcJar create a AltSvcJar which implements Jar. +func NewAltSvcJar() *AltSvcJar { + return &AltSvcJar{ + entries: make(map[string]*AltSvc), + } +} + +func (j *AltSvcJar) GetAltSvc(addr string) *AltSvc { + if addr == "" { + return nil + } + as, ok := j.entries[addr] + if !ok { + return nil + } + now := time.Now() + j.mu.Lock() + defer j.mu.Unlock() + if as.Expire.Before(now) { // expired + delete(j.entries, addr) + return nil + } + return as +} + +func (j *AltSvcJar) SetAltSvc(addr string, as *AltSvc) { + if addr == "" { + return + } + j.mu.Lock() + defer j.mu.Unlock() + j.entries[addr] = as +} + +// AltSvc is the parsed alt-svc. +type AltSvc struct { + // Protocol is the alt-svc proto, e.g. h3. + Protocol string + // Host is the alt-svc's host, could be empty if + // it's the same host as the raw request. + Host string + // Port is the alt-svc's port. + Port string + // Expire is the time that the alt-svc should expire. + Expire time.Time +} diff --git a/pkg/altsvc/jar.go b/pkg/altsvc/jar.go new file mode 100644 index 00000000..6264bc56 --- /dev/null +++ b/pkg/altsvc/jar.go @@ -0,0 +1,9 @@ +package altsvc + +// Jar is a container of AltSvc. +type Jar interface { + // SetAltSvc store the AltSvc. + SetAltSvc(addr string, as *AltSvc) + // GetAltSvc get the AltSvc. + GetAltSvc(addr string) *AltSvc +} diff --git a/pkg/tls/conn.go b/pkg/tls/conn.go new file mode 100644 index 00000000..428cd930 --- /dev/null +++ b/pkg/tls/conn.go @@ -0,0 +1,39 @@ +package tls + +import ( + "context" + "crypto/tls" + "net" +) + +// Conn is the recommended interface for the connection +// returned by the DailTLS function (Client.SetDialTLS, +// Transport.DialTLSContext), so that the TLS handshake negotiation +// can automatically decide whether to use HTTP2 or HTTP1 (ALPN). +// If this interface is not implemented, HTTP1 will be used by default. +type Conn interface { + net.Conn + // ConnectionState returns basic TLS details about the connection. + ConnectionState() tls.ConnectionState + // Handshake runs the client or server handshake + // protocol if it has not yet been run. + // + // Most uses of this package need not call Handshake explicitly: the + // first Read or Write will call it automatically. + // + // For control over canceling or setting a timeout on a handshake, use + // HandshakeContext or the Dialer's DialContext method instead. + Handshake() error + + // HandshakeContext runs the client or server handshake + // protocol if it has not yet been run. + // + // The provided Context must be non-nil. If the context is canceled before + // the handshake is complete, the handshake is interrupted and an error is returned. + // Once the handshake has completed, cancellation of the context will not affect the + // connection. + // + // Most uses of this package need not call HandshakeContext explicitly: the + // first Read or Write will call it automatically. + HandshakeContext(ctx context.Context) error +} diff --git a/redirect.go b/redirect.go new file mode 100644 index 00000000..fcc13e4b --- /dev/null +++ b/redirect.go @@ -0,0 +1,134 @@ +package req + +import ( + "errors" + "fmt" + "net" + "net/http" + "strings" +) + +// RedirectPolicy represents the redirect policy for Client. +type RedirectPolicy func(req *http.Request, via []*http.Request) error + +// MaxRedirectPolicy specifies the max number of redirect +func MaxRedirectPolicy(noOfRedirect int) RedirectPolicy { + return func(req *http.Request, via []*http.Request) error { + if len(via) >= noOfRedirect { + return fmt.Errorf("stopped after %d redirects", noOfRedirect) + } + return nil + } +} + +// DefaultRedirectPolicy allows up to 10 redirects +func DefaultRedirectPolicy() RedirectPolicy { + return MaxRedirectPolicy(10) +} + +// NoRedirectPolicy disable redirect behaviour +func NoRedirectPolicy() RedirectPolicy { + return func(req *http.Request, via []*http.Request) error { + return http.ErrUseLastResponse + } +} + +// SameDomainRedirectPolicy allows redirect only if the redirected domain +// is the same as original domain, e.g. redirect to "www.imroc.cc" from +// "imroc.cc" is allowed, but redirect to "google.com" is not allowed. +func SameDomainRedirectPolicy() RedirectPolicy { + return func(req *http.Request, via []*http.Request) error { + if getDomain(req.URL.Host) != getDomain(via[0].URL.Host) { + return errors.New("different domain name is not allowed") + } + return nil + } +} + +// SameHostRedirectPolicy allows redirect only if the redirected host +// is the same as original host, e.g. redirect to "www.imroc.cc" from +// "imroc.cc" is not the allowed. +func SameHostRedirectPolicy() RedirectPolicy { + return func(req *http.Request, via []*http.Request) error { + if getHostname(req.URL.Host) != getHostname(via[0].URL.Host) { + return errors.New("different host name is not allowed") + } + return nil + } +} + +// AllowedHostRedirectPolicy allows redirect only if the redirected host +// match one of the host that specified. +func AllowedHostRedirectPolicy(hosts ...string) RedirectPolicy { + m := make(map[string]struct{}) + for _, h := range hosts { + m[strings.ToLower(getHostname(h))] = struct{}{} + } + + return func(req *http.Request, via []*http.Request) error { + h := getHostname(req.URL.Host) + if _, ok := m[h]; !ok { + return fmt.Errorf("redirect host [%s] is not allowed", h) + } + return nil + } +} + +// AllowedDomainRedirectPolicy allows redirect only if the redirected domain +// match one of the domain that specified. +func AllowedDomainRedirectPolicy(hosts ...string) RedirectPolicy { + domains := make(map[string]struct{}) + for _, h := range hosts { + domains[strings.ToLower(getDomain(h))] = struct{}{} + } + + return func(req *http.Request, via []*http.Request) error { + domain := getDomain(req.URL.Host) + if _, ok := domains[domain]; !ok { + return fmt.Errorf("redirect domain [%s] is not allowed", domain) + } + return nil + } +} + +func getHostname(host string) (hostname string) { + if strings.Index(host, ":") > 0 { + host, _, _ = net.SplitHostPort(host) + } + hostname = strings.ToLower(host) + return +} + +func getDomain(host string) string { + host = getHostname(host) + ss := strings.Split(host, ".") + if len(ss) < 3 { + return host + } + ss = ss[1:] + return strings.Join(ss, ".") +} + +// AlwaysCopyHeaderRedirectPolicy ensures that the given sensitive headers will +// always be copied on redirect. +// By default, golang will copy all of the original request's headers on redirect, +// unless they're sensitive, like "Authorization" or "Www-Authenticate". Only send +// sensitive ones to the same origin, or subdomains thereof (https://go-review.googlesource.com/c/go/+/28930/) +// Check discussion: https://github.com/golang/go/issues/4800 +// For example: +// +// client.SetRedirectPolicy(req.AlwaysCopyHeaderRedirectPolicy("Authorization")) +func AlwaysCopyHeaderRedirectPolicy(headers ...string) RedirectPolicy { + return func(req *http.Request, via []*http.Request) error { + for _, header := range headers { + if len(req.Header.Values(header)) > 0 { + continue + } + vals := via[0].Header.Values(header) + for _, val := range vals { + req.Header.Add(header, val) + } + } + return nil + } +} diff --git a/req.go b/req.go index f8a9ae73..84a052d7 100644 --- a/req.go +++ b/req.go @@ -2,660 +2,127 @@ package req import ( "bytes" - "compress/gzip" - "encoding/json" - "encoding/xml" - "errors" "fmt" - "io" - "io/ioutil" - "mime/multipart" "net/http" - "net/textproto" "net/url" - "os" - "path/filepath" - "strconv" - "strings" - "time" ) -// default *Req -var std = New() - -// flags to decide which part can be outputed -const ( - LreqHead = 1 << iota // output request head (request line and request header) - LreqBody // output request body - LrespHead // output response head (response line and response header) - LrespBody // output response body - Lcost // output time costed by the request - LstdFlags = LreqHead | LreqBody | LrespHead | LrespBody -) - -// Header represents http request header -type Header map[string]string - -func (h Header) Clone() Header { - if h == nil { - return nil - } - hh := Header{} - for k, v := range h { - hh[k] = v - } - return hh -} - -// Param represents http request param -type Param map[string]interface{} - -// QueryParam is used to force append http request param to the uri -type QueryParam map[string]interface{} - -// Host is used for set request's Host -type Host string - -// FileUpload represents a file to upload -type FileUpload struct { - // filename in multipart form. - FileName string - // form field name - FieldName string - // file to uplaod, required - File io.ReadCloser -} - -type DownloadProgress func(current, total int64) - -type UploadProgress func(current, total int64) - -// File upload files matching the name pattern such as -// /usr/*/bin/go* (assuming the Separator is '/') -func File(patterns ...string) interface{} { - matches := []string{} - for _, pattern := range patterns { - m, err := filepath.Glob(pattern) - if err != nil { - return err - } - matches = append(matches, m...) - } - if len(matches) == 0 { - return errors.New("req: no file have been matched") - } - uploads := []FileUpload{} - for _, match := range matches { - if s, e := os.Stat(match); e != nil || s.IsDir() { - continue - } - file, _ := os.Open(match) - uploads = append(uploads, FileUpload{ - File: file, - FileName: filepath.Base(match), - FieldName: "media", - }) - } - - return uploads -} - -type bodyJson struct { - v interface{} -} - -type bodyXml struct { - v interface{} -} - -// BodyJSON make the object be encoded in json format and set it to the request body -func BodyJSON(v interface{}) *bodyJson { - return &bodyJson{v: v} -} - -// BodyXML make the object be encoded in xml format and set it to the request body -func BodyXML(v interface{}) *bodyXml { - return &bodyXml{v: v} -} - -// Req is a convenient client for initiating requests -type Req struct { - client *http.Client - jsonEncOpts *jsonEncOpts - xmlEncOpts *xmlEncOpts - flag int -} - -// New create a new *Req -func New() *Req { - return &Req{flag: LstdFlags} +type kv struct { + Key string + Value string } -type param struct { - url.Values -} - -func (p *param) getValues() url.Values { - if p.Values == nil { - p.Values = make(url.Values) - } - return p.Values -} - -func (p *param) Copy(pp param) { - if pp.Values == nil { - return - } - vs := p.getValues() - for key, values := range pp.Values { - for _, value := range values { - vs.Add(key, value) - } - } -} -func (p *param) Adds(m map[string]interface{}) { - if len(m) == 0 { - return - } - vs := p.getValues() - for k, v := range m { - vs.Add(k, fmt.Sprint(v)) - } -} - -func (p *param) Empty() bool { - return p.Values == nil -} - -// Do execute a http request with sepecify method and url, -// and it can also have some optional params, depending on your needs. -func (r *Req) Do(method, rawurl string, vs ...interface{}) (resp *Resp, err error) { - if rawurl == "" { - return nil, errors.New("req: url not specified") - } - req := &http.Request{ - Method: method, - Header: make(http.Header), - Proto: "HTTP/1.1", - ProtoMajor: 1, - ProtoMinor: 1, - } - resp = &Resp{req: req, r: r} - - var queryParam param - var formParam param - var uploads []FileUpload - var uploadProgress UploadProgress - var progress func(int64, int64) - var delayedFunc []func() - var lastFunc []func() - - for _, v := range vs { - switch vv := v.(type) { - case Header: - for key, value := range vv { - req.Header.Add(key, value) - } - case http.Header: - for key, values := range vv { - for _, value := range values { - req.Header.Add(key, value) - } - } - case *bodyJson: - fn, err := setBodyJson(req, resp, r.jsonEncOpts, vv.v) - if err != nil { - return nil, err - } - delayedFunc = append(delayedFunc, fn) - case *bodyXml: - fn, err := setBodyXml(req, resp, r.xmlEncOpts, vv.v) - if err != nil { - return nil, err - } - delayedFunc = append(delayedFunc, fn) - case Param: - if method == "GET" || method == "HEAD" { - queryParam.Adds(vv) - } else { - formParam.Adds(vv) - } - case QueryParam: - queryParam.Adds(vv) - case string: - setBodyBytes(req, resp, []byte(vv)) - case []byte: - setBodyBytes(req, resp, vv) - case bytes.Buffer: - setBodyBytes(req, resp, vv.Bytes()) - case *http.Client: - resp.client = vv - case FileUpload: - uploads = append(uploads, vv) - case []FileUpload: - uploads = append(uploads, vv...) - case *http.Cookie: - req.AddCookie(vv) - case Host: - req.Host = string(vv) - case io.Reader: - fn := setBodyReader(req, resp, vv) - lastFunc = append(lastFunc, fn) - case UploadProgress: - uploadProgress = vv - case DownloadProgress: - resp.downloadProgress = vv - case func(int64, int64): - progress = vv - case error: - return nil, vv - } - } - - if length := req.Header.Get("Content-Length"); length != "" { - if l, err := strconv.ParseInt(length, 10, 64); err == nil { - req.ContentLength = l - } - } - - if len(uploads) > 0 && (req.Method == "POST" || req.Method == "PUT") { // multipart - var up UploadProgress - if uploadProgress != nil { - up = uploadProgress - } else if progress != nil { - up = UploadProgress(progress) - } - multipartHelper := &multipartHelper{ - form: formParam.Values, - uploads: uploads, - uploadProgress: up, - } - multipartHelper.Upload(req) - resp.multipartHelper = multipartHelper - } else { - if progress != nil { - resp.downloadProgress = DownloadProgress(progress) - } - if !formParam.Empty() { - if req.Body != nil { - queryParam.Copy(formParam) - } else { - setBodyBytes(req, resp, []byte(formParam.Encode())) - setContentType(req, "application/x-www-form-urlencoded; charset=UTF-8") - } - } - } - - if !queryParam.Empty() { - paramStr := queryParam.Encode() - if strings.IndexByte(rawurl, '?') == -1 { - rawurl = rawurl + "?" + paramStr - } else { - rawurl = rawurl + "&" + paramStr - } - } - - u, err := url.Parse(rawurl) - if err != nil { - return nil, err - } - req.URL = u - - if host := req.Header.Get("Host"); host != "" { - req.Host = host - } - - for _, fn := range delayedFunc { - fn() - } - - if resp.client == nil { - resp.client = r.Client() - } - - response, err := resp.client.Do(req) - if err != nil { - return nil, err - } - - for _, fn := range lastFunc { - fn() - } - - resp.resp = response - - if _, ok := resp.client.Transport.(*http.Transport); ok && response.Header.Get("Content-Encoding") == "gzip" && req.Header.Get("Accept-Encoding") != "" { - body, err := gzip.NewReader(response.Body) - if err != nil { - return nil, err - } - response.Body = body - } - - // output detail if Debug is enabled - if Debug { - fmt.Println(resp.Dump()) - } - return +// ContentDisposition represents parameters in `Content-Disposition` +// MIME header of multipart request. +type ContentDisposition struct { + kv []kv } -func setBodyBytes(req *http.Request, resp *Resp, data []byte) { - resp.reqBody = data - req.Body = ioutil.NopCloser(bytes.NewReader(data)) - req.ContentLength = int64(len(data)) +// Add adds a new key-value pair of Content-Disposition +func (c *ContentDisposition) Add(key, value string) *ContentDisposition { + c.kv = append(c.kv, kv{Key: key, Value: value}) + return c } -func setBodyJson(req *http.Request, resp *Resp, opts *jsonEncOpts, v interface{}) (func(), error) { - var data []byte - switch vv := v.(type) { - case string: - data = []byte(vv) - case []byte: - data = vv - case *bytes.Buffer: - data = vv.Bytes() - default: - if opts != nil { - var buf bytes.Buffer - enc := json.NewEncoder(&buf) - enc.SetIndent(opts.indentPrefix, opts.indentValue) - enc.SetEscapeHTML(opts.escapeHTML) - err := enc.Encode(v) - if err != nil { - return nil, err - } - data = buf.Bytes() - } else { - var err error - data, err = json.Marshal(v) - if err != nil { - return nil, err - } - } +func (c *ContentDisposition) string() string { + if c == nil { + return "" } - setBodyBytes(req, resp, data) - delayedFunc := func() { - setContentType(req, "application/json; charset=UTF-8") + s := "" + for _, kv := range c.kv { + s += fmt.Sprintf("; %s=%q", kv.Key, kv.Value) } - return delayedFunc, nil + return s } -func setBodyXml(req *http.Request, resp *Resp, opts *xmlEncOpts, v interface{}) (func(), error) { - var data []byte - switch vv := v.(type) { - case string: - data = []byte(vv) - case []byte: - data = vv - case *bytes.Buffer: - data = vv.Bytes() - default: - if opts != nil { - var buf bytes.Buffer - enc := xml.NewEncoder(&buf) - enc.Indent(opts.prefix, opts.indent) - err := enc.Encode(v) - if err != nil { - return nil, err - } - data = buf.Bytes() - } else { - var err error - data, err = xml.Marshal(v) - if err != nil { - return nil, err - } - } - } - setBodyBytes(req, resp, data) - delayedFunc := func() { - setContentType(req, "application/xml; charset=UTF-8") - } - return delayedFunc, nil +// FileUpload represents a "form-data" multipart +type FileUpload struct { + // "name" parameter in `Content-Disposition` + ParamName string + // "filename" parameter in `Content-Disposition` + FileName string + // The file to be uploaded. + GetFileContent GetContentFunc + // Optional file length in bytes. + FileSize int64 + // Optional Content-Type + ContentType string + + // Optional extra ContentDisposition parameters. + // According to the HTTP specification, this should be nil, + // but some servers may not follow the specification and + // requires `Content-Disposition` parameters more than just + // "name" and "filename". + ExtraContentDisposition *ContentDisposition +} + +// UploadInfo is the information for each UploadCallback call. +type UploadInfo struct { + // parameter name in multipart upload + ParamName string + // filename in multipart upload + FileName string + // total file length in bytes. + FileSize int64 + // uploaded file length in bytes. + UploadedSize int64 } -func setContentType(req *http.Request, contentType string) { - if req.Header.Get("Content-Type") == "" { - req.Header.Set("Content-Type", contentType) - } -} +// UploadCallback is the callback which will be invoked during +// multipart upload. +type UploadCallback func(info UploadInfo) -func setBodyReader(req *http.Request, resp *Resp, rd io.Reader) func() { - var rc io.ReadCloser - if trc, ok := rd.(io.ReadCloser); ok { - rc = trc - } else { - rc = ioutil.NopCloser(rd) - } - bw := &bodyWrapper{ - ReadCloser: rc, - limit: 102400, - } - req.Body = bw - lastFunc := func() { - resp.reqBody = bw.buf.Bytes() - } - return lastFunc +// DownloadInfo is the information for each DownloadCallback call. +type DownloadInfo struct { + // Response is the corresponding Response during download. + Response *Response + // downloaded body length in bytes. + DownloadedSize int64 } -type bodyWrapper struct { - io.ReadCloser - buf bytes.Buffer - limit int -} +// DownloadCallback is the callback which will be invoked during +// response body download. +type DownloadCallback func(info DownloadInfo) -func (b *bodyWrapper) Read(p []byte) (n int, err error) { - n, err = b.ReadCloser.Read(p) - if left := b.limit - b.buf.Len(); left > 0 && n > 0 { - if n <= left { - b.buf.Write(p[:n]) - } else { - b.buf.Write(p[:left]) - } +func cloneSlice[T any](s []T) []T { + if len(s) == 0 { + return nil } - return -} - -type multipartHelper struct { - form url.Values - uploads []FileUpload - dump []byte - uploadProgress UploadProgress -} - -func (m *multipartHelper) Upload(req *http.Request) { - pr, pw := io.Pipe() - bodyWriter := multipart.NewWriter(pw) - go func() { - for key, values := range m.form { - for _, value := range values { - bodyWriter.WriteField(key, value) - } - } - var upload func(io.Writer, io.Reader) error - if m.uploadProgress != nil { - var total int64 - for _, up := range m.uploads { - if file, ok := up.File.(*os.File); ok { - stat, err := file.Stat() - if err != nil { - continue - } - total += stat.Size() - } - } - var current int64 - buf := make([]byte, 1024) - var lastTime time.Time - upload = func(w io.Writer, r io.Reader) error { - for { - n, err := r.Read(buf) - if n > 0 { - _, _err := w.Write(buf[:n]) - if _err != nil { - return _err - } - current += int64(n) - if now := time.Now(); now.Sub(lastTime) > 200*time.Millisecond { - lastTime = now - m.uploadProgress(current, total) - } - } - if err == io.EOF { - return nil - } - if err != nil { - return err - } - } - } - } - - i := 0 - for _, up := range m.uploads { - if up.FieldName == "" { - i++ - up.FieldName = "file" + strconv.Itoa(i) - } - fileWriter, err := bodyWriter.CreateFormFile(up.FieldName, up.FileName) - if err != nil { - continue - } - //iocopy - if upload == nil { - io.Copy(fileWriter, up.File) - } else { - if _, ok := up.File.(*os.File); ok { - upload(fileWriter, up.File) - } else { - io.Copy(fileWriter, up.File) - } - } - up.File.Close() - } - bodyWriter.Close() - pw.Close() - }() - req.Header.Set("Content-Type", bodyWriter.FormDataContentType()) - req.Body = ioutil.NopCloser(pr) + ss := make([]T, len(s)) + copy(ss, s) + return ss } -func (m *multipartHelper) Dump() []byte { - if m.dump != nil { - return m.dump +func cloneUrlValues(v url.Values) url.Values { + if v == nil { + return nil } - var buf bytes.Buffer - bodyWriter := multipart.NewWriter(&buf) - for key, values := range m.form { + vv := make(url.Values) + for key, values := range v { for _, value := range values { - m.writeField(bodyWriter, key, value) + vv.Add(key, value) } } - for _, up := range m.uploads { - m.writeFile(bodyWriter, up.FieldName, up.FileName) - } - bodyWriter.Close() - m.dump = buf.Bytes() - return m.dump + return vv } -func (m *multipartHelper) writeField(w *multipart.Writer, fieldname, value string) error { - h := make(textproto.MIMEHeader) - h.Set("Content-Disposition", - fmt.Sprintf(`form-data; name="%s"`, fieldname)) - p, err := w.CreatePart(h) - if err != nil { - return err +func cloneMap(h map[string]string) map[string]string { + if h == nil { + return nil } - _, err = p.Write([]byte(value)) - return err -} - -func (m *multipartHelper) writeFile(w *multipart.Writer, fieldname, filename string) error { - h := make(textproto.MIMEHeader) - h.Set("Content-Disposition", - fmt.Sprintf(`form-data; name="%s"; filename="%s"`, - fieldname, filename)) - h.Set("Content-Type", "application/octet-stream") - p, err := w.CreatePart(h) - if err != nil { - return err + m := make(map[string]string) + for k, v := range h { + m[k] = v } - _, err = p.Write([]byte("******")) - return err -} - -// Get execute a http GET request -func (r *Req) Get(url string, v ...interface{}) (*Resp, error) { - return r.Do("GET", url, v...) -} - -// Post execute a http POST request -func (r *Req) Post(url string, v ...interface{}) (*Resp, error) { - return r.Do("POST", url, v...) -} - -// Put execute a http PUT request -func (r *Req) Put(url string, v ...interface{}) (*Resp, error) { - return r.Do("PUT", url, v...) -} - -// Patch execute a http PATCH request -func (r *Req) Patch(url string, v ...interface{}) (*Resp, error) { - return r.Do("PATCH", url, v...) -} - -// Delete execute a http DELETE request -func (r *Req) Delete(url string, v ...interface{}) (*Resp, error) { - return r.Do("DELETE", url, v...) -} - -// Head execute a http HEAD request -func (r *Req) Head(url string, v ...interface{}) (*Resp, error) { - return r.Do("HEAD", url, v...) + return m } -// Options execute a http OPTIONS request -func (r *Req) Options(url string, v ...interface{}) (*Resp, error) { - return r.Do("OPTIONS", url, v...) -} - -// Get execute a http GET request -func Get(url string, v ...interface{}) (*Resp, error) { - return std.Get(url, v...) -} - -// Post execute a http POST request -func Post(url string, v ...interface{}) (*Resp, error) { - return std.Post(url, v...) -} - -// Put execute a http PUT request -func Put(url string, v ...interface{}) (*Resp, error) { - return std.Put(url, v...) -} - -// Head execute a http HEAD request -func Head(url string, v ...interface{}) (*Resp, error) { - return std.Head(url, v...) -} - -// Options execute a http OPTIONS request -func Options(url string, v ...interface{}) (*Resp, error) { - return std.Options(url, v...) -} - -// Delete execute a http DELETE request -func Delete(url string, v ...interface{}) (*Resp, error) { - return std.Delete(url, v...) -} - -// Patch execute a http PATCH request -func Patch(url string, v ...interface{}) (*Resp, error) { - return std.Patch(url, v...) -} - -// Do execute request. -func Do(method, url string, v ...interface{}) (*Resp, error) { - return std.Do(method, url, v...) +// convertHeaderToString converts http header to a string. +func convertHeaderToString(h http.Header) string { + if h == nil { + return "" + } + buf := new(bytes.Buffer) + h.Write(buf) + return buf.String() } diff --git a/req_test.go b/req_test.go index a863941a..e89f524f 100644 --- a/req_test.go +++ b/req_test.go @@ -1,313 +1,385 @@ package req import ( - "bytes" "encoding/json" "encoding/xml" - "io/ioutil" + "fmt" + "github.com/imroc/req/v3/internal/header" + "github.com/imroc/req/v3/internal/tests" + "go/token" + "golang.org/x/text/encoding/simplifiedchinese" + "golang.org/x/text/transform" + "io" "net/http" "net/http/httptest" + "net/url" + "os" + "path/filepath" + "reflect" + "strconv" "strings" + "sync" "testing" + "unsafe" ) -func TestUrlParam(t *testing.T) { - m := map[string]interface{}{ - "access_token": "123abc", - "name": "roc", - "enc": "涓枃", - } - queryHandler := func(w http.ResponseWriter, r *http.Request) { - query := r.URL.Query() - for key, value := range m { - if v := query.Get(key); value != v { - t.Errorf("query param %s = %s; want = %s", key, v, value) - } - } - } - ts := httptest.NewServer(http.HandlerFunc(queryHandler)) - _, err := Get(ts.URL, QueryParam(m)) - if err != nil { - t.Fatal(err) - } - _, err = Head(ts.URL, Param(m)) - if err != nil { - t.Fatal(err) - } - _, err = Put(ts.URL, QueryParam(m)) - if err != nil { - t.Fatal(err) - } +func tc() *Client { + return C(). + SetBaseURL(getTestServerURL()). + EnableInsecureSkipVerify() } -func TestFormParam(t *testing.T) { - formParam := Param{ - "access_token": "123abc", - "name": "roc", - "enc": "涓枃", - } - formHandler := func(w http.ResponseWriter, r *http.Request) { - r.ParseForm() - for key, value := range formParam { - if v := r.FormValue(key); value != v { - t.Errorf("form param %s = %s; want = %s", key, v, value) - } - } - } - ts := httptest.NewServer(http.HandlerFunc(formHandler)) - url := ts.URL - _, err := Post(url, formParam) - if err != nil { - t.Fatal(err) - } +var testDataPath string + +func init() { + pwd, _ := os.Getwd() + testDataPath = filepath.Join(pwd, ".testdata") } -func TestParamWithBody(t *testing.T) { - reqBody := "request body" - p := Param{ - "name": "roc", - "job": "programmer", - } - buf := bytes.NewBufferString(reqBody) - ts := newDefaultTestServer() - r, err := Post(ts.URL, p, buf) - if err != nil { - t.Fatal(err) - } - if r.Request().URL.Query().Get("name") != "roc" { - t.Error("param should in the url when set body manually") - } - if string(r.reqBody) != reqBody { - t.Error("request body not equal") - } +func createTestServer() *httptest.Server { + server := httptest.NewUnstartedServer(http.HandlerFunc(handleHTTP)) + server.EnableHTTP2 = true + server.StartTLS() + return server } -func TestParamBoth(t *testing.T) { - urlParam := QueryParam{ - "access_token": "123abc", - "enc": "涓枃", +func handleHTTP(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Method", r.Method) + switch r.Method { + case http.MethodGet: + handleGet(w, r) + case http.MethodPost: + handlePost(w, r) } - formParam := Param{ - "name": "roc", - "job": "杞欢宸ョ▼甯", - } - handler := func(w http.ResponseWriter, r *http.Request) { - query := r.URL.Query() - for key, value := range urlParam { - if v := query.Get(key); value != v { - t.Errorf("query param %s = %s; want = %s", key, v, value) - } +} + +var testServerMu sync.Mutex +var testServer *httptest.Server + +func getTestServerURL() string { + if testServer != nil { + return testServer.URL + } + testServerMu.Lock() + defer testServerMu.Unlock() + testServer = createTestServer() + return testServer.URL +} + +func getTestFileContent(t *testing.T, filename string) []byte { + b, err := os.ReadFile(tests.GetTestFilePath(filename)) + tests.AssertNoError(t, err) + return b +} + +func assertClone(t *testing.T, e, g interface{}) { + ev := reflect.ValueOf(e).Elem() + gv := reflect.ValueOf(g).Elem() + et := ev.Type() + + for i := 0; i < ev.NumField(); i++ { + sf := ev.Field(i) + st := et.Field(i) + + var ee, gg interface{} + if !token.IsExported(st.Name) { + ee = reflect.NewAt(sf.Type(), unsafe.Pointer(sf.UnsafeAddr())).Elem().Interface() + gg = reflect.NewAt(sf.Type(), unsafe.Pointer(gv.Field(i).UnsafeAddr())).Elem().Interface() + } else { + ee = sf.Interface() + gg = gv.Field(i).Interface() } - r.ParseForm() - for key, value := range formParam { - if v := r.FormValue(key); value != v { - t.Errorf("form param %s = %s; want = %s", key, v, value) + if sf.Kind() == reflect.Func || sf.Kind() == reflect.Slice || sf.Kind() == reflect.Ptr { + if ee != nil { + if gg == nil { + t.Errorf("Field %s.%s is nil", et.Name(), et.Field(i).Name) + } } + continue + } + if !reflect.DeepEqual(ee, gg) { + t.Errorf("Field %s.%s is not equal, expected [%v], got [%v]", et.Name(), et.Field(i).Name, ee, gg) } } - ts := httptest.NewServer(http.HandlerFunc(handler)) - url := ts.URL - _, err := Patch(url, urlParam, formParam) - if err != nil { - t.Fatal(err) - } +} +// Echo is used in "/echo" API. +type Echo struct { + Header http.Header `json:"header" xml:"header"` + Body string `json:"body" xml:"body"` } -func TestBody(t *testing.T) { - body := "request body" - handler := func(w http.ResponseWriter, r *http.Request) { - bs, err := ioutil.ReadAll(r.Body) - if err != nil { - t.Fatal(err) +func handlePost(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/": + io.Copy(io.Discard, r.Body) + w.Write([]byte("TestPost: text response")) + case "/raw-upload": + io.Copy(io.Discard, r.Body) + case "/file-text": + r.ParseMultipartForm(10e6) + files := r.MultipartForm.File["file"] + file, _ := files[0].Open() + b, _ := io.ReadAll(file) + r.ParseForm() + if a := r.FormValue("attempt"); a != "" && a != "2" { + w.WriteHeader(http.StatusInternalServerError) } - if string(bs) != body { - t.Errorf("body = %s; want = %s", bs, body) + w.Write(b) + case "/form": + r.ParseForm() + ret, _ := json.Marshal(&r.Form) + w.Header().Set(header.ContentType, header.JsonContentType) + w.Write(ret) + case "/multipart": + r.ParseMultipartForm(10e6) + m := make(map[string]interface{}) + m["values"] = r.MultipartForm.Value + m["files"] = r.MultipartForm.File + ret, _ := json.Marshal(&m) + w.Header().Set(header.ContentType, header.JsonContentType) + w.Write(ret) + case "/search": + handleSearch(w, r) + case "/redirect": + io.Copy(io.Discard, r.Body) + w.Header().Set(header.Location, "/") + w.WriteHeader(http.StatusMovedPermanently) + case "/content-type": + io.Copy(io.Discard, r.Body) + w.Write([]byte(r.Header.Get(header.ContentType))) + case "/echo": + b, _ := io.ReadAll(r.Body) + e := Echo{ + Header: r.Header, + Body: string(b), } + w.Header().Set(header.ContentType, header.JsonContentType) + result, _ := json.Marshal(&e) + w.Write(result) } - ts := httptest.NewServer(http.HandlerFunc(handler)) - - // string - _, err := Post(ts.URL, body) - if err != nil { - t.Fatal(err) - } +} - // []byte - _, err = Post(ts.URL, []byte(body)) - if err != nil { - t.Fatal(err) - } +func handleGetUserProfile(w http.ResponseWriter, r *http.Request) { + user := strings.TrimLeft(r.URL.Path, "/user") + user = strings.TrimSuffix(user, "/profile") + w.Write([]byte(fmt.Sprintf("%s's profile", user))) +} - // *bytes.Buffer - var buf bytes.Buffer - buf.WriteString(body) - _, err = Post(ts.URL, &buf) - if err != nil { - t.Fatal(err) - } +type UserInfo struct { + Username string `json:"username" xml:"username"` + Email string `json:"email" xml:"email"` +} - // io.Reader - _, err = Post(ts.URL, strings.NewReader(body)) - if err != nil { - t.Fatal(err) - } +type ErrorMessage struct { + ErrorCode int `json:"error_code" xml:"ErrorCode"` + ErrorMessage string `json:"error_message" xml:"ErrorMessage"` } -func TestBodyJSON(t *testing.T) { - type content struct { - Code int `json:"code"` - Msg string `json:"msg"` - } - c := content{ - Code: 1, - Msg: "ok", - } - checkData := func(data []byte) { - var cc content - err := json.Unmarshal(data, &cc) - if err != nil { - t.Fatal(err) +func handleSearch(w http.ResponseWriter, r *http.Request) { + r.ParseForm() + username := r.FormValue("username") + tp := r.FormValue("type") + var marshalFunc func(v interface{}) ([]byte, error) + if tp == "xml" { + w.Header().Set(header.ContentType, header.XmlContentType) + marshalFunc = xml.Marshal + } else { + w.Header().Set(header.ContentType, header.JsonContentType) + marshalFunc = json.Marshal + } + var result interface{} + switch username { + case "": + w.WriteHeader(http.StatusBadRequest) + result = &ErrorMessage{ + ErrorCode: 10000, + ErrorMessage: "need username", } - if cc != c { - t.Errorf("request body = %+v; want = %+v", cc, c) + case "imroc": + w.WriteHeader(http.StatusOK) + result = &UserInfo{ + Username: "imroc", + Email: "roc@imroc.cc", } - } - handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - data, err := ioutil.ReadAll(r.Body) - if err != nil { - t.Fatal(err) + default: + w.WriteHeader(http.StatusNotFound) + result = &ErrorMessage{ + ErrorCode: 10001, + ErrorMessage: "username not exists", } - checkData(data) - }) - - ts := httptest.NewServer(handler) - resp, err := Post(ts.URL, BodyJSON(&c)) - if err != nil { - t.Fatal(err) } - checkData(resp.reqBody) + data, _ := marshalFunc(result) + w.Write(data) +} - SetJSONEscapeHTML(false) - SetJSONIndent("", "\t") - resp, err = Put(ts.URL, BodyJSON(&c)) - if err != nil { - t.Fatal(err) +func toGbk(s string) []byte { + reader := transform.NewReader(strings.NewReader(s), simplifiedchinese.GBK.NewEncoder()) + d, e := io.ReadAll(reader) + if e != nil { + panic(e) } - checkData(resp.reqBody) + return d } -func TestBodyXML(t *testing.T) { - type content struct { - Code int `xml:"code"` - Msg string `xml:"msg"` - } - c := content{ - Code: 1, - Msg: "ok", - } - checkData := func(data []byte) { - var cc content - err := xml.Unmarshal(data, &cc) +func handleGet(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/": + w.Write([]byte("TestGet: text response")) + case "/status": + r.ParseForm() + code := r.FormValue("code") + codeInt, err := strconv.Atoi(code) if err != nil { - t.Fatal(err) + w.WriteHeader(http.StatusBadRequest) + w.Write([]byte(err.Error())) + return } - if cc != c { - t.Errorf("request body = %+v; want = %+v", cc, c) + w.WriteHeader(codeInt) + case "/urlencode": + info := &UserInfo{ + Username: "鎴戞槸roc", + Email: "roc@imroc.cc", } - } - handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - data, err := ioutil.ReadAll(r.Body) + bs, err := json.Marshal(info) if err != nil { - t.Fatal(err) + panic(err) } - checkData(data) - }) - - ts := httptest.NewServer(handler) - resp, err := Post(ts.URL, BodyXML(&c)) - if err != nil { - t.Fatal(err) - } - checkData(resp.reqBody) - - SetXMLIndent("", " ") - resp, err = Put(ts.URL, BodyXML(&c)) - if err != nil { - t.Fatal(err) - } - checkData(resp.reqBody) -} - -func TestHeader(t *testing.T) { - header := Header{ - "User-Agent": "V1.0.0", - "Authorization": "roc", - } - handler := func(w http.ResponseWriter, r *http.Request) { - for key, value := range header { - if v := r.Header.Get(key); value != v { - t.Errorf("header %q = %s; want = %s", key, v, value) - } + result := url.QueryEscape(string(bs)) + w.Write([]byte(result)) + case "/bad-request": + w.WriteHeader(http.StatusBadRequest) + case "/too-many": + w.WriteHeader(http.StatusTooManyRequests) + w.Header().Set(header.ContentType, header.JsonContentType) + w.Write([]byte(`{"errMsg":"too many requests"}`)) + case "/chunked": + w.Header().Add("Trailer", "Expires") + w.Write([]byte(`This is a chunked body`)) + case "/host-header": + w.Write([]byte(r.Host)) + case "/json": + r.ParseForm() + if r.FormValue("type") != "no" { + w.Header().Set(header.ContentType, header.JsonContentType) } - } - ts := httptest.NewServer(http.HandlerFunc(handler)) - _, err := Head(ts.URL, header) - if err != nil { - t.Fatal(err) - } - - httpHeader := make(http.Header) - for key, value := range header { - httpHeader.Add(key, value) - } - _, err = Head(ts.URL, httpHeader) - if err != nil { - t.Fatal(err) - } -} - -func TestUpload(t *testing.T) { - str := "hello req" - file := ioutil.NopCloser(strings.NewReader(str)) - upload := FileUpload{ - File: file, - FieldName: "media", - FileName: "hello.txt", - } - handler := func(w http.ResponseWriter, r *http.Request) { - mr, err := r.MultipartReader() + w.Header().Set(header.ContentType, header.JsonContentType) + if r.FormValue("error") == "yes" { + w.WriteHeader(http.StatusBadRequest) + w.Write([]byte(`{"message": "not allowed"}`)) + } else { + w.Write([]byte(`{"name": "roc"}`)) + } + case "/xml": + r.ParseForm() + if r.FormValue("type") != "no" { + w.Header().Set(header.ContentType, header.XmlContentType) + } + w.Write([]byte(`roc`)) + case "/unlimited-redirect": + w.Header().Set("Location", "/unlimited-redirect") + w.WriteHeader(http.StatusMovedPermanently) + case "/redirect-to-other": + w.Header().Set("Location", "http://dummy.local/test") + w.WriteHeader(http.StatusMovedPermanently) + case "/pragma": + w.Header().Add("Pragma", "no-cache") + case "/payload": + b, _ := io.ReadAll(r.Body) + w.Write(b) + case "/gbk": + w.Header().Set(header.ContentType, "text/plain; charset=gbk") + w.Write(toGbk("鎴戞槸roc")) + case "/gbk-no-charset": + b, err := os.ReadFile(tests.GetTestFilePath("sample-gbk.html")) if err != nil { - t.Fatal(err) + panic(err) } - for { - p, err := mr.NextPart() - if err != nil { - break - } - if p.FileName() != upload.FileName { - t.Errorf("filename = %s; want = %s", p.FileName(), upload.FileName) - } - if p.FormName() != upload.FieldName { - t.Errorf("formname = %s; want = %s", p.FileName(), upload.FileName) + w.Header().Set(header.ContentType, "text/html") + w.Write(b) + case "/header": + b, _ := json.Marshal(r.Header) + w.Header().Set(header.ContentType, header.JsonContentType) + w.Write(b) + case "/user-agent": + w.Write([]byte(r.Header.Get(header.UserAgent))) + case "/content-type": + w.Write([]byte(r.Header.Get(header.ContentType))) + case "/query-parameter": + w.Write([]byte(r.URL.RawQuery)) + case "/search": + handleSearch(w, r) + case "/download": + size := 100 * 1024 * 1024 + w.Header().Set("Content-Length", strconv.Itoa(size)) + buf := make([]byte, 1024) + for i := 0; i < 1024; i++ { + buf[i] = 'h' + } + for i := 0; i < size; { + wbuf := buf + if size-i < 1024 { + wbuf = buf[:size-i] } - data, err := ioutil.ReadAll(p) + n, err := w.Write(wbuf) if err != nil { - t.Fatal(err) - } - if string(data) != str { - t.Errorf("file content = %s; want = %s", data, str) + break } + i += n + } + case "/protected": + auth := r.Header.Get("Authorization") + if auth == "Bearer goodtoken" { + w.Write([]byte("good")) + } else { + w.WriteHeader(http.StatusUnauthorized) + w.Write([]byte(`bad`)) + } + default: + if strings.HasPrefix(r.URL.Path, "/user") { + handleGetUserProfile(w, r) } } - ts := httptest.NewServer(http.HandlerFunc(handler)) - _, err := Post(ts.URL, upload) - if err != nil { - t.Fatal(err) +} + +func assertStatus(t *testing.T, resp *Response, err error, statusCode int, status string) { + tests.AssertNoError(t, err) + tests.AssertNotNil(t, resp) + tests.AssertNotNil(t, resp.Body) + tests.AssertEqual(t, statusCode, resp.StatusCode) + tests.AssertEqual(t, status, resp.Status) +} + +func assertSuccess(t *testing.T, resp *Response, err error) { + tests.AssertNoError(t, err) + tests.AssertNotNil(t, resp.Response) + tests.AssertNotNil(t, resp.Response.Body) + tests.AssertEqual(t, http.StatusOK, resp.StatusCode) + tests.AssertEqual(t, "200 OK", resp.Status) + if !resp.IsSuccessState() { + t.Error("Response.IsSuccessState should return true") } - ts = newDefaultTestServer() - _, err = Post(ts.URL, File("*.go")) - if err != nil { - t.Fatal(err) +} + +func assertIsError(t *testing.T, resp *Response, err error) { + tests.AssertNoError(t, err) + tests.AssertNotNil(t, resp) + tests.AssertNotNil(t, resp.Body) + if !resp.IsErrorState() { + t.Error("Response.IsErrorState should return true") } } + +func TestTrailer(t *testing.T) { + resp, err := tc().EnableForceHTTP1().R().Get("/chunked") + assertSuccess(t, resp, err) + _, ok := resp.Trailer["Expires"] + if !ok { + t.Error("trailer not exists") + } +} + +func testWithAllTransport(t *testing.T, testFunc func(t *testing.T, c *Client)) { + testFunc(t, tc()) + testFunc(t, tc().EnableForceHTTP1()) +} diff --git a/request.go b/request.go new file mode 100644 index 00000000..9036d72a --- /dev/null +++ b/request.go @@ -0,0 +1,1229 @@ +package req + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "net/http" + urlpkg "net/url" + "os" + "path/filepath" + "reflect" + "strings" + "time" + + "github.com/hashicorp/go-multierror" + + "github.com/imroc/req/v3/internal/dump" + "github.com/imroc/req/v3/internal/header" + "github.com/imroc/req/v3/internal/util" +) + +// Request struct is used to compose and fire individual request from +// req client. Request provides lots of chainable settings which can +// override client level settings. +type Request struct { + PathParams map[string]string + QueryParams urlpkg.Values + FormData urlpkg.Values + OrderedFormData []string + Headers http.Header + Cookies []*http.Cookie + Result interface{} + Error interface{} + RawRequest *http.Request + StartTime time.Time + RetryAttempt int + RawURL string // read only + Method string + Body []byte + GetBody GetContentFunc + // URL is an auto-generated field, and is nil in request middleware (OnBeforeRequest), + // consider using RawURL if you want, it's not nil in client middleware (WrapRoundTripFunc) + URL *urlpkg.URL + + isMultiPart bool + disableAutoReadResponse bool + forceChunkedEncoding bool + isSaveResponse bool + close bool + error error + client *Client + uploadCallback UploadCallback + uploadCallbackInterval time.Duration + downloadCallback DownloadCallback + downloadCallbackInterval time.Duration + unReplayableBody io.ReadCloser + retryOption *retryOption + bodyReadCloser io.ReadCloser + dumpOptions *DumpOptions + marshalBody interface{} + ctx context.Context + uploadFiles []*FileUpload + uploadReader []io.ReadCloser + outputFile string + output io.Writer + trace *clientTrace + dumpBuffer *bytes.Buffer + responseReturnTime time.Time + afterResponse []ResponseMiddleware +} + +type GetContentFunc func() (io.ReadCloser, error) + +func (r *Request) getHeader(key string) string { + if r.Headers == nil { + return "" + } + return r.Headers.Get(key) +} + +// TraceInfo returns the trace information, only available if trace is enabled +// (see Request.EnableTrace and Client.EnableTraceAll). +func (r *Request) TraceInfo() TraceInfo { + ct := r.trace + + if ct == nil { + return TraceInfo{} + } + + ti := TraceInfo{ + IsConnReused: ct.gotConnInfo.Reused, + IsConnWasIdle: ct.gotConnInfo.WasIdle, + ConnIdleTime: ct.gotConnInfo.IdleTime, + } + + endTime := ct.endTime + if endTime.IsZero() { // in case timeout + endTime = r.responseReturnTime + } + + if !ct.tlsHandshakeStart.IsZero() { + if !ct.tlsHandshakeDone.IsZero() { + ti.TLSHandshakeTime = ct.tlsHandshakeDone.Sub(ct.tlsHandshakeStart) + } else { + ti.TLSHandshakeTime = endTime.Sub(ct.tlsHandshakeStart) + } + } + + if ct.gotConnInfo.Reused { + ti.TotalTime = endTime.Sub(ct.getConn) + } else { + if ct.dnsStart.IsZero() { + ti.TotalTime = endTime.Sub(r.StartTime) + } else { + ti.TotalTime = endTime.Sub(ct.dnsStart) + } + } + + dnsDone := ct.dnsDone + if dnsDone.IsZero() { + dnsDone = endTime + } + + if !ct.dnsStart.IsZero() { + ti.DNSLookupTime = dnsDone.Sub(ct.dnsStart) + } + + // Only calculate on successful connections + if !ct.connectDone.IsZero() { + ti.TCPConnectTime = ct.connectDone.Sub(dnsDone) + } + + // Only calculate on successful connections + if !ct.gotConn.IsZero() { + ti.ConnectTime = ct.gotConn.Sub(ct.getConn) + } + + // Only calculate on successful connections + if !ct.gotFirstResponseByte.IsZero() { + ti.FirstResponseTime = ct.gotFirstResponseByte.Sub(ct.gotConn) + ti.ResponseTime = endTime.Sub(ct.gotFirstResponseByte) + } + + // Capture remote address info when connection is non-nil + if ct.gotConnInfo.Conn != nil { + ti.RemoteAddr = ct.gotConnInfo.Conn.RemoteAddr() + ti.LocalAddr = ct.gotConnInfo.Conn.LocalAddr() + } + + return ti +} + +// HeaderToString get all header as string. +func (r *Request) HeaderToString() string { + return convertHeaderToString(r.Headers) +} + +// SetURL set the url for request. +func (r *Request) SetURL(url string) *Request { + r.RawURL = url + return r +} + +// SetFormDataFromValues set the form data from url.Values, will not +// been used if request method does not allow payload. +func (r *Request) SetFormDataFromValues(data urlpkg.Values) *Request { + if r.FormData == nil { + r.FormData = urlpkg.Values{} + } + for k, v := range data { + for _, kv := range v { + r.FormData.Add(k, kv) + } + } + return r +} + +// SetFormData set the form data from a map, will not been used +// if request method does not allow payload. +func (r *Request) SetFormData(data map[string]string) *Request { + if r.FormData == nil { + r.FormData = urlpkg.Values{} + } + for k, v := range data { + r.FormData.Set(k, v) + } + return r +} + +// SetOrderedFormData set the ordered form data from key-values pairs. +func (r *Request) SetOrderedFormData(kvs ...string) *Request { + r.OrderedFormData = append(r.OrderedFormData, kvs...) + return r +} + +// SetFormDataAnyType set the form data from a map, which value could be any type, +// will convert to string automatically. +// It will not been used if request method does not allow payload. +func (r *Request) SetFormDataAnyType(data map[string]interface{}) *Request { + if r.FormData == nil { + r.FormData = urlpkg.Values{} + } + for k, v := range data { + r.FormData.Set(k, fmt.Sprint(v)) + } + return r +} + +// SetCookies set http cookies for the request. +func (r *Request) SetCookies(cookies ...*http.Cookie) *Request { + r.Cookies = append(r.Cookies, cookies...) + return r +} + +// SetQueryString set URL query parameters for the request using +// raw query string. +func (r *Request) SetQueryString(query string) *Request { + params, err := urlpkg.ParseQuery(strings.TrimSpace(query)) + if err != nil { + r.client.log.Warnf("failed to parse query string (%s): %v", query, err) + return r + } + if r.QueryParams == nil { + r.QueryParams = make(urlpkg.Values) + } + for p, v := range params { + for _, pv := range v { + r.QueryParams.Add(p, pv) + } + } + return r +} + +// SetFileReader set up a multipart form with a reader to upload file. +func (r *Request) SetFileReader(paramName, filename string, reader io.Reader) *Request { + r.SetFileUpload(FileUpload{ + ParamName: paramName, + FileName: filename, + GetFileContent: func() (io.ReadCloser, error) { + if rc, ok := reader.(io.ReadCloser); ok { + return rc, nil + } + return io.NopCloser(reader), nil + }, + }) + return r +} + +// SetFileBytes set up a multipart form with given []byte to upload. +func (r *Request) SetFileBytes(paramName, filename string, content []byte) *Request { + r.SetFileUpload(FileUpload{ + ParamName: paramName, + FileName: filename, + GetFileContent: func() (io.ReadCloser, error) { + return io.NopCloser(bytes.NewReader(content)), nil + }, + }) + return r +} + +// SetFiles set up a multipart form from a map to upload, which +// key is the parameter name, and value is the file path. +func (r *Request) SetFiles(files map[string]string) *Request { + for k, v := range files { + r.SetFile(k, v) + } + return r +} + +// SetFile set up a multipart form from file path to upload, +// which read file from filePath automatically to upload. +func (r *Request) SetFile(paramName, filePath string) *Request { + file, err := os.Open(filePath) + if err != nil { + r.client.log.Errorf("failed to open %s: %v", filePath, err) + r.appendError(err) + return r + } + fileInfo, err := os.Stat(filePath) + if err != nil { + r.client.log.Errorf("failed to stat file %s: %v", filePath, err) + r.appendError(err) + return r + } + r.isMultiPart = true + return r.SetFileUpload(FileUpload{ + ParamName: paramName, + FileName: filepath.Base(filePath), + GetFileContent: func() (io.ReadCloser, error) { + if r.RetryAttempt > 0 { + file, err = os.Open(filePath) + if err != nil { + return nil, err + } + } + return file, nil + }, + FileSize: fileInfo.Size(), + }) +} + +var ( + errMissingParamName = errors.New("missing param name in multipart file upload") + errMissingFileName = errors.New("missing filename in multipart file upload") + errMissingFileContent = errors.New("missing file content in multipart file upload") +) + +// SetFileUpload set the fully custimized multipart file upload options. +func (r *Request) SetFileUpload(uploads ...FileUpload) *Request { + r.isMultiPart = true + for _, upload := range uploads { + shouldAppend := true + if upload.ParamName == "" { + r.appendError(errMissingParamName) + shouldAppend = false + } + if upload.FileName == "" { + r.appendError(errMissingFileName) + shouldAppend = false + } + if upload.GetFileContent == nil { + r.appendError(errMissingFileContent) + shouldAppend = false + } + if shouldAppend { + r.uploadFiles = append(r.uploadFiles, &upload) + } + } + return r +} + +// SetUploadCallback set the UploadCallback which will be invoked at least +// every 200ms during file upload, usually used to show upload progress. +func (r *Request) SetUploadCallback(callback UploadCallback) *Request { + return r.SetUploadCallbackWithInterval(callback, 200*time.Millisecond) +} + +// SetUploadCallbackWithInterval set the UploadCallback which will be invoked at least +// every `minInterval` during file upload, usually used to show upload progress. +func (r *Request) SetUploadCallbackWithInterval(callback UploadCallback, minInterval time.Duration) *Request { + if callback == nil { + return r + } + r.forceChunkedEncoding = true + r.uploadCallback = callback + r.uploadCallbackInterval = minInterval + return r +} + +// SetDownloadCallback set the DownloadCallback which will be invoked at least +// every 200ms during file upload, usually used to show download progress. +func (r *Request) SetDownloadCallback(callback DownloadCallback) *Request { + return r.SetDownloadCallbackWithInterval(callback, 200*time.Millisecond) +} + +// SetDownloadCallbackWithInterval set the DownloadCallback which will be invoked at least +// every `minInterval` during file upload, usually used to show download progress. +func (r *Request) SetDownloadCallbackWithInterval(callback DownloadCallback, minInterval time.Duration) *Request { + if callback == nil { + return r + } + r.downloadCallback = callback + r.downloadCallbackInterval = minInterval + return r +} + +// SetResult set the result that response Body will be unmarshalled to if +// no error occurs and Response.ResultState() returns SuccessState, by default +// it requires HTTP status `code >= 200 && code <= 299`, you can also use +// Request.SetResultStateCheckFunc or Client.SetResultStateCheckFunc to customize +// the result state check logic. +// +// Deprecated: Use SetSuccessResult instead. +func (r *Request) SetResult(result interface{}) *Request { + return r.SetSuccessResult(result) +} + +// SetSuccessResult set the result that response Body will be unmarshalled to if +// no error occurs and Response.ResultState() returns SuccessState, by default +// it requires HTTP status `code >= 200 && code <= 299`, you can also use +// Request.SetResultStateCheckFunc or Client.SetResultStateCheckFunc to customize +// the result state check logic. +func (r *Request) SetSuccessResult(result interface{}) *Request { + if result == nil { + return r + } + r.Result = util.GetPointer(result) + return r +} + +// SetError set the result that response body will be unmarshalled to if +// no error occurs and Response.ResultState() returns ErrorState, by default +// it requires HTTP status `code >= 400`, you can also use Request.SetResultStateCheckFunc +// or Client.SetResultStateCheckFunc to customize the result state check logic. +// +// Deprecated: Use SetErrorResult result. +func (r *Request) SetError(err interface{}) *Request { + return r.SetErrorResult(err) +} + +// SetErrorResult set the result that response body will be unmarshalled to if +// no error occurs and Response.ResultState() returns ErrorState, by default +// it requires HTTP status `code >= 400`, you can also use Request.SetResultStateCheckFunc +// or Client.SetResultStateCheckFunc to customize the result state check logic. +func (r *Request) SetErrorResult(err interface{}) *Request { + if err == nil { + return r + } + r.Error = util.GetPointer(err) + return r +} + +// SetBearerAuthToken set bearer auth token for the request. +func (r *Request) SetBearerAuthToken(token string) *Request { + return r.SetHeader(header.Authorization, "Bearer "+token) +} + +// SetBasicAuth set basic auth for the request. +func (r *Request) SetBasicAuth(username, password string) *Request { + return r.SetHeader(header.Authorization, util.BasicAuthHeaderValue(username, password)) +} + +// SetDigestAuth sets the Digest Access auth scheme for the HTTP request. If a server responds with 401 and sends a +// Digest challenge in the WWW-Authenticate Header, the request will be resent with the appropriate Authorization Header. +// +// For Example: To set the Digest scheme with username "roc" and password "123456" +// +// client.R().SetDigestAuth("roc", "123456") +// +// Information about Digest Access Authentication can be found in RFC7616: +// +// https://datatracker.ietf.org/doc/html/rfc7616 +// +// This method overrides the username and password set by method `Client.SetCommonDigestAuth`. +func (r *Request) SetDigestAuth(username, password string) *Request { + r.OnAfterResponse(handleDigestAuthFunc(username, password)) + return r +} + +// OnAfterResponse add a response middleware which hooks after response received. +func (r *Request) OnAfterResponse(m ResponseMiddleware) *Request { + r.afterResponse = append(r.afterResponse, m) + return r +} + +// SetHeaders set headers from a map for the request. +func (r *Request) SetHeaders(hdrs map[string]string) *Request { + for k, v := range hdrs { + r.SetHeader(k, v) + } + return r +} + +// SetHeader set a header for the request. +func (r *Request) SetHeader(key, value string) *Request { + if r.Headers == nil { + r.Headers = make(http.Header) + } + r.Headers.Set(key, value) + return r +} + +// SetHeadersNonCanonical set headers from a map for the request which key is a +// non-canonical key (keep case unchanged), only valid for HTTP/1.1. +func (r *Request) SetHeadersNonCanonical(hdrs map[string]string) *Request { + for k, v := range hdrs { + r.SetHeaderNonCanonical(k, v) + } + return r +} + +// SetHeaderNonCanonical set a header for the request which key is a +// non-canonical key (keep case unchanged), only valid for HTTP/1.1. +func (r *Request) SetHeaderNonCanonical(key, value string) *Request { + if r.Headers == nil { + r.Headers = make(http.Header) + } + r.Headers[key] = append(r.Headers[key], value) + return r +} + +const ( + // HeaderOderKey is the key of header order, which specifies the order + // of the http header. + HeaderOderKey = "__header_order__" + // PseudoHeaderOderKey is the key of pseudo header order, which specifies + // the order of the http2 and http3 pseudo header. + PseudoHeaderOderKey = "__pseudo_header_order__" +) + +// SetHeaderOrder set the order of the http header (case-insensitive). +// For example: +// +// client.R().SetHeaderOrder( +// "custom-header", +// "cookie", +// "user-agent", +// "accept-encoding", +// ) +func (r *Request) SetHeaderOrder(keys ...string) *Request { + if r.Headers == nil { + r.Headers = make(http.Header) + } + r.Headers[HeaderOderKey] = append(r.Headers[HeaderOderKey], keys...) + return r +} + +// SetPseudoHeaderOrder set the order of the pseudo http header (case-insensitive). +// Note this is only valid for http2 and http3. +// For example: +// +// client.R().SetPseudoHeaderOrder( +// ":scheme", +// ":authority", +// ":path", +// ":method", +// ) +func (r *Request) SetPseudoHeaderOrder(keys ...string) *Request { + if r.Headers == nil { + r.Headers = make(http.Header) + } + r.Headers[PseudoHeaderOderKey] = append(r.Headers[PseudoHeaderOderKey], keys...) + return r +} + +// SetOutputFile set the file that response Body will be downloaded to. +func (r *Request) SetOutputFile(file string) *Request { + r.isSaveResponse = true + r.outputFile = file + return r +} + +// SetOutput set the io.Writer that response Body will be downloaded to. +func (r *Request) SetOutput(output io.Writer) *Request { + if output == nil { + r.client.log.Warnf("nil io.Writer is not allowed in SetOutput") + return r + } + r.output = output + r.isSaveResponse = true + return r +} + +// SetQueryParams set URL query parameters from a map for the request. +func (r *Request) SetQueryParams(params map[string]string) *Request { + for k, v := range params { + r.SetQueryParam(k, v) + } + return r +} + +// SetQueryParamsAnyType set URL query parameters from a map for the request. +// The value of map is any type, will be convert to string automatically. +func (r *Request) SetQueryParamsAnyType(params map[string]interface{}) *Request { + for k, v := range params { + r.SetQueryParam(k, fmt.Sprint(v)) + } + return r +} + +// SetQueryParam set an URL query parameter for the request. +func (r *Request) SetQueryParam(key, value string) *Request { + if r.QueryParams == nil { + r.QueryParams = make(urlpkg.Values) + } + r.QueryParams.Set(key, value) + return r +} + +// AddQueryParam add a URL query parameter for the request. +func (r *Request) AddQueryParam(key, value string) *Request { + if r.QueryParams == nil { + r.QueryParams = make(urlpkg.Values) + } + r.QueryParams.Add(key, value) + return r +} + +// AddQueryParams add one or more values of specified URL query parameter for the request. +func (r *Request) AddQueryParams(key string, values ...string) *Request { + if r.QueryParams == nil { + r.QueryParams = make(urlpkg.Values) + } + vs := r.QueryParams[key] + vs = append(vs, values...) + r.QueryParams[key] = vs + return r +} + +// SetPathParams set URL path parameters from a map for the request. +func (r *Request) SetPathParams(params map[string]string) *Request { + for key, value := range params { + r.SetPathParam(key, value) + } + return r +} + +// SetPathParam set a URL path parameter for the request. +func (r *Request) SetPathParam(key, value string) *Request { + if r.PathParams == nil { + r.PathParams = make(map[string]string) + } + r.PathParams[key] = value + return r +} + +func (r *Request) appendError(err error) { + r.error = multierror.Append(r.error, err) +} + +var errRetryableWithUnReplayableBody = errors.New("retryable request should not have unreplayable Body (io.Reader)") + +func (r *Request) newErrorResponse(err error) *Response { + resp := &Response{Request: r} + resp.Err = err + return resp +} + +// Do fires http request, 0 or 1 context is allowed, and returns the *Response which +// is always not nil, and Response.Err is not nil if error occurs. +func (r *Request) Do(ctx ...context.Context) *Response { + if len(ctx) > 0 && ctx[0] != nil { + r.ctx = ctx[0] + } + + defer func() { + r.responseReturnTime = time.Now() + }() + if r.error != nil { + return r.newErrorResponse(r.error) + } + if r.retryOption != nil && r.retryOption.MaxRetries != 0 && r.unReplayableBody != nil { // retryable request should not have unreplayable Body + return r.newErrorResponse(errRetryableWithUnReplayableBody) + } + resp, _ := r.do() + return resp +} + +func (r *Request) do() (resp *Response, err error) { + defer func() { + if resp == nil { + resp = &Response{Request: r} + } + if err != nil && resp.Err == nil { + resp.Err = err + } + }() + + for { + if r.Headers == nil { + r.Headers = make(http.Header) + } + for _, f := range r.client.udBeforeRequest { + if err = f(r.client, r); err != nil { + return + } + } + for _, f := range r.client.beforeRequest { + if err = f(r.client, r); err != nil { + return + } + } + + if r.client.wrappedRoundTrip != nil { + resp, err = r.client.wrappedRoundTrip.RoundTrip(r) + } else { + resp, err = r.client.roundTrip(r) + } + + // Determine if the error is from a canceled context. + // Store it here so it doesn't get lost when processing the AfterResponse middleware. + contextCanceled := errors.Is(err, context.Canceled) + + for _, f := range r.afterResponse { + if err = f(r.client, resp); err != nil { + return + } + } + + if contextCanceled || r.retryOption == nil || (r.RetryAttempt >= r.retryOption.MaxRetries && r.retryOption.MaxRetries >= 0) { // absolutely cannot retry. + return + } + + // check retry whether is needed. + needRetry := err != nil // default behaviour: retry if error occurs + if l := len(r.retryOption.RetryConditions); l > 0 { // override default behaviour if custom RetryConditions has been set. + for i := l - 1; i >= 0; i-- { + needRetry = r.retryOption.RetryConditions[i](resp, err) + if needRetry { + break + } + } + } + if !needRetry { // no retry is needed. + return + } + + // need retry, attempt to retry + r.RetryAttempt++ + if l := len(r.retryOption.RetryHooks); l > 0 { + for i := l - 1; i >= 0; i-- { // run retry hooks in reverse order + r.retryOption.RetryHooks[i](resp, err) + } + } + time.Sleep(r.retryOption.GetRetryInterval(resp, r.RetryAttempt)) + + // clean up before retry + if r.dumpBuffer != nil { + r.dumpBuffer.Reset() + } + if r.trace != nil { + r.trace = &clientTrace{} + } + resp.body = nil + resp.result = nil + resp.error = nil + } +} + +// Send fires http request with specified method and url, returns the +// *Response which is always not nil, and the error is not nil if error occurs. +func (r *Request) Send(method, url string) (*Response, error) { + r.Method = method + r.RawURL = url + resp := r.Do() + if resp.Err != nil && r.client.onError != nil { + r.client.onError(r.client, r, resp, resp.Err) + } + return resp, resp.Err +} + +// MustGet like Get, panic if error happens, should only be used to +// test without error handling. +func (r *Request) MustGet(url string) *Response { + resp, err := r.Get(url) + if err != nil { + panic(err) + } + return resp +} + +// Get fires http request with GET method and the specified URL. +func (r *Request) Get(url string) (*Response, error) { + return r.Send(http.MethodGet, url) +} + +// MustPost like Post, panic if error happens. should only be used to +// test without error handling. +func (r *Request) MustPost(url string) *Response { + resp, err := r.Post(url) + if err != nil { + panic(err) + } + return resp +} + +// Post fires http request with POST method and the specified URL. +func (r *Request) Post(url string) (*Response, error) { + return r.Send(http.MethodPost, url) +} + +// MustPut like Put, panic if error happens, should only be used to +// test without error handling. +func (r *Request) MustPut(url string) *Response { + resp, err := r.Put(url) + if err != nil { + panic(err) + } + return resp +} + +// Put fires http request with PUT method and the specified URL. +func (r *Request) Put(url string) (*Response, error) { + return r.Send(http.MethodPut, url) +} + +// MustPatch like Patch, panic if error happens, should only be used +// to test without error handling. +func (r *Request) MustPatch(url string) *Response { + resp, err := r.Patch(url) + if err != nil { + panic(err) + } + return resp +} + +// Patch fires http request with PATCH method and the specified URL. +func (r *Request) Patch(url string) (*Response, error) { + return r.Send(http.MethodPatch, url) +} + +// MustDelete like Delete, panic if error happens, should only be used +// to test without error handling. +func (r *Request) MustDelete(url string) *Response { + resp, err := r.Delete(url) + if err != nil { + panic(err) + } + return resp +} + +// Delete fires http request with DELETE method and the specified URL. +func (r *Request) Delete(url string) (*Response, error) { + return r.Send(http.MethodDelete, url) +} + +// MustOptions like Options, panic if error happens, should only be +// used to test without error handling. +func (r *Request) MustOptions(url string) *Response { + resp, err := r.Options(url) + if err != nil { + panic(err) + } + return resp +} + +// Options fires http request with OPTIONS method and the specified URL. +func (r *Request) Options(url string) (*Response, error) { + return r.Send(http.MethodOptions, url) +} + +// MustHead like Head, panic if error happens, should only be used +// to test without error handling. +func (r *Request) MustHead(url string) *Response { + resp, err := r.Send(http.MethodHead, url) + if err != nil { + panic(err) + } + return resp +} + +// Head fires http request with HEAD method and the specified URL. +func (r *Request) Head(url string) (*Response, error) { + return r.Send(http.MethodHead, url) +} + +// SetBody set the request Body, accepts string, []byte, io.Reader, map and struct. +func (r *Request) SetBody(body interface{}) *Request { + if body == nil { + return r + } + switch b := body.(type) { + case io.ReadCloser: + r.unReplayableBody = b + r.GetBody = func() (io.ReadCloser, error) { + return r.unReplayableBody, nil + } + case io.Reader: + r.unReplayableBody = io.NopCloser(b) + r.GetBody = func() (io.ReadCloser, error) { + return r.unReplayableBody, nil + } + case []byte: + r.SetBodyBytes(b) + case string: + r.SetBodyString(b) + case func() (io.ReadCloser, error): + r.GetBody = b + case GetContentFunc: + r.GetBody = b + default: + t := reflect.TypeOf(body) + switch t.Kind() { + case reflect.Ptr, reflect.Struct, reflect.Map, reflect.Slice, reflect.Array: + r.marshalBody = body + default: + r.SetBodyString(fmt.Sprint(body)) + } + } + return r +} + +// SetBodyBytes set the request Body as []byte. +func (r *Request) SetBodyBytes(body []byte) *Request { + r.Body = body + r.GetBody = func() (io.ReadCloser, error) { + return io.NopCloser(bytes.NewReader(body)), nil + } + return r +} + +// SetBodyString set the request Body as string. +func (r *Request) SetBodyString(body string) *Request { + return r.SetBodyBytes([]byte(body)) +} + +// SetBodyJsonString set the request Body as string and set Content-Type header +// as "application/json; charset=utf-8" +func (r *Request) SetBodyJsonString(body string) *Request { + return r.SetBodyJsonBytes([]byte(body)) +} + +// SetBodyJsonBytes set the request Body as []byte and set Content-Type header +// as "application/json; charset=utf-8" +func (r *Request) SetBodyJsonBytes(body []byte) *Request { + r.SetContentType(header.JsonContentType) + return r.SetBodyBytes(body) +} + +// SetBodyJsonMarshal set the request Body that marshaled from object, and +// set Content-Type header as "application/json; charset=utf-8" +func (r *Request) SetBodyJsonMarshal(v interface{}) *Request { + b, err := r.client.jsonMarshal(v) + if err != nil { + r.appendError(err) + return r + } + return r.SetBodyJsonBytes(b) +} + +// SetBodyXmlString set the request Body as string and set Content-Type header +// as "text/xml; charset=utf-8" +func (r *Request) SetBodyXmlString(body string) *Request { + return r.SetBodyXmlBytes([]byte(body)) +} + +// SetBodyXmlBytes set the request Body as []byte and set Content-Type header +// as "text/xml; charset=utf-8" +func (r *Request) SetBodyXmlBytes(body []byte) *Request { + r.SetContentType(header.XmlContentType) + return r.SetBodyBytes(body) +} + +// SetBodyXmlMarshal set the request Body that marshaled from object, and +// set Content-Type header as "text/xml; charset=utf-8" +func (r *Request) SetBodyXmlMarshal(v interface{}) *Request { + b, err := r.client.xmlMarshal(v) + if err != nil { + r.appendError(err) + return r + } + return r.SetBodyXmlBytes(b) +} + +// SetContentType set the `Content-Type` for the request. +func (r *Request) SetContentType(contentType string) *Request { + return r.SetHeader(header.ContentType, contentType) +} + +// Context method returns the Context if its already set in request +// otherwise it creates new one using `context.Background()`. +func (r *Request) Context() context.Context { + if r.ctx == nil { + r.ctx = context.Background() + } + return r.ctx +} + +// SetContext method sets the context.Context for current Request. It allows +// to interrupt the request execution if ctx.Done() channel is closed. +// See https://blog.golang.org/context article and the "context" package +// documentation. +// +// Attention: make sure call SetContext before EnableDumpXXX if you want to +// dump at the request level. +func (r *Request) SetContext(ctx context.Context) *Request { + if ctx != nil { + r.ctx = ctx + } + return r +} + +// SetContextData sets the key-value pair data for current Request, so you +// can access some extra context info for current Request in hook or middleware. +func (r *Request) SetContextData(key, val any) *Request { + r.ctx = context.WithValue(r.Context(), key, val) + return r +} + +// GetContextData returns the context data of specified key, which set by SetContextData. +func (r *Request) GetContextData(key any) any { + return r.Context().Value(key) +} + +// DisableAutoReadResponse disable read response body automatically (enabled by default). +func (r *Request) DisableAutoReadResponse() *Request { + r.disableAutoReadResponse = true + return r +} + +// EnableAutoReadResponse enable read response body automatically (enabled by default). +func (r *Request) EnableAutoReadResponse() *Request { + r.disableAutoReadResponse = false + return r +} + +// DisableTrace disables trace. +func (r *Request) DisableTrace() *Request { + r.trace = nil + return r +} + +// EnableTrace enables trace (http3 currently does not support trace). +func (r *Request) EnableTrace() *Request { + if r.trace == nil { + r.trace = &clientTrace{} + } + return r +} + +func (r *Request) getDumpBuffer() *bytes.Buffer { + if r.dumpBuffer == nil { + r.dumpBuffer = new(bytes.Buffer) + } + return r.dumpBuffer +} + +func (r *Request) getDumpOptions() *DumpOptions { + if r.dumpOptions == nil { + r.dumpOptions = &DumpOptions{ + RequestHeader: true, + RequestBody: true, + ResponseHeader: true, + ResponseBody: true, + Output: r.getDumpBuffer(), + } + } + return r.dumpOptions +} + +// EnableDumpTo enables dump and save to the specified io.Writer. +func (r *Request) EnableDumpTo(output io.Writer) *Request { + r.getDumpOptions().Output = output + return r.EnableDump() +} + +// EnableDumpToFile enables dump and save to the specified filename. +func (r *Request) EnableDumpToFile(filename string) *Request { + file, err := os.Create(filename) + if err != nil { + r.appendError(err) + return r + } + r.getDumpOptions().Output = file + return r.EnableDump() +} + +// SetDumpOptions sets DumpOptions at request level. +func (r *Request) SetDumpOptions(opt *DumpOptions) *Request { + if opt == nil { + return r + } + if opt.Output == nil { + opt.Output = r.getDumpBuffer() + } + if r.dumpOptions != nil { + *r.dumpOptions = *opt + } else { + r.dumpOptions = opt + } + return r +} + +// EnableDump enables dump, including all content for the request and response by default. +func (r *Request) EnableDump() *Request { + return r.SetContext(context.WithValue(r.Context(), dump.DumperKey, newDumper(r.getDumpOptions()))) +} + +// EnableDumpWithoutBody enables dump only header for the request and response. +func (r *Request) EnableDumpWithoutBody() *Request { + o := r.getDumpOptions() + o.RequestBody = false + o.ResponseBody = false + return r.EnableDump() +} + +// EnableDumpWithoutHeader enables dump only Body for the request and response. +func (r *Request) EnableDumpWithoutHeader() *Request { + o := r.getDumpOptions() + o.RequestHeader = false + o.ResponseHeader = false + return r.EnableDump() +} + +// EnableDumpWithoutResponse enables dump only request. +func (r *Request) EnableDumpWithoutResponse() *Request { + o := r.getDumpOptions() + o.ResponseHeader = false + o.ResponseBody = false + return r.EnableDump() +} + +// EnableDumpWithoutRequest enables dump only response. +func (r *Request) EnableDumpWithoutRequest() *Request { + o := r.getDumpOptions() + o.RequestHeader = false + o.RequestBody = false + return r.EnableDump() +} + +// EnableDumpWithoutRequestBody enables dump with request Body excluded, +// can be used in upload request to avoid dump the unreadable binary content. +func (r *Request) EnableDumpWithoutRequestBody() *Request { + o := r.getDumpOptions() + o.RequestBody = false + return r.EnableDump() +} + +// EnableDumpWithoutResponseBody enables dump with response Body excluded, +// can be used in download request to avoid dump the unreadable binary content. +func (r *Request) EnableDumpWithoutResponseBody() *Request { + o := r.getDumpOptions() + o.ResponseBody = false + return r.EnableDump() +} + +// EnableForceChunkedEncoding enables force using chunked encoding when uploading. +func (r *Request) EnableForceChunkedEncoding() *Request { + r.forceChunkedEncoding = true + return r +} + +// DisableForceChunkedEncoding disables force using chunked encoding when uploading. +func (r *Request) DisableForceChunkedEncoding() *Request { + r.forceChunkedEncoding = false + return r +} + +// EnableForceMultipart enables force using multipart to upload form data. +func (r *Request) EnableForceMultipart() *Request { + r.isMultiPart = true + return r +} + +// DisableForceMultipart disables force using multipart to upload form data. +func (r *Request) DisableForceMultipart() *Request { + r.isMultiPart = false + return r +} + +func (r *Request) getRetryOption() *retryOption { + if r.retryOption == nil { + r.retryOption = newDefaultRetryOption() + } + return r.retryOption +} + +// SetRetryCount enables retry and set the maximum retry count. +// It will retry infinitely if count is negative. +func (r *Request) SetRetryCount(count int) *Request { + r.getRetryOption().MaxRetries = count + return r +} + +// SetRetryInterval sets the custom GetRetryIntervalFunc, you can use this to +// implement your own backoff retry algorithm. +// For example: +// +// req.SetRetryInterval(func(resp *req.Response, attempt int) time.Duration { +// sleep := 0.01 * math.Exp2(float64(attempt)) +// return time.Duration(math.Min(2, sleep)) * time.Second +// }) +func (r *Request) SetRetryInterval(getRetryIntervalFunc GetRetryIntervalFunc) *Request { + r.getRetryOption().GetRetryInterval = getRetryIntervalFunc + return r +} + +// SetRetryFixedInterval set retry to use a fixed interval. +func (r *Request) SetRetryFixedInterval(interval time.Duration) *Request { + r.getRetryOption().GetRetryInterval = func(resp *Response, attempt int) time.Duration { + return interval + } + return r +} + +// SetRetryBackoffInterval set retry to use a capped exponential backoff with jitter. +// https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/ +func (r *Request) SetRetryBackoffInterval(min, max time.Duration) *Request { + r.getRetryOption().GetRetryInterval = backoffInterval(min, max) + return r +} + +// SetRetryHook set the retry hook which will be executed before a retry. +// It will override other retry hooks if any been added before (including +// client-level retry hooks). +func (r *Request) SetRetryHook(hook RetryHookFunc) *Request { + r.getRetryOption().RetryHooks = []RetryHookFunc{hook} + return r +} + +// AddRetryHook adds a retry hook which will be executed before a retry. +func (r *Request) AddRetryHook(hook RetryHookFunc) *Request { + ro := r.getRetryOption() + ro.RetryHooks = append(ro.RetryHooks, hook) + return r +} + +// SetRetryCondition sets the retry condition, which determines whether the +// request should retry. +// It will override other retry conditions if any been added before (including +// client-level retry conditions). +func (r *Request) SetRetryCondition(condition RetryConditionFunc) *Request { + r.getRetryOption().RetryConditions = []RetryConditionFunc{condition} + return r +} + +// AddRetryCondition adds a retry condition, which determines whether the +// request should retry. +func (r *Request) AddRetryCondition(condition RetryConditionFunc) *Request { + ro := r.getRetryOption() + ro.RetryConditions = append(ro.RetryConditions, condition) + return r +} + +// SetClient change the client of request dynamically. +func (r *Request) SetClient(client *Client) *Request { + if client != nil { + r.client = client + } + return r +} + +// GetClient returns the current client used by request. +func (r *Request) GetClient() *Client { + return r.client +} + +// EnableCloseConnection closes the connection after sending this +// request and reading its response if set to true in HTTP/1.1 and +// HTTP/2. +// +// Setting this field prevents re-use of TCP connections between +// requests to the same hosts event if EnableKeepAlives() were called. +func (r *Request) EnableCloseConnection() *Request { + r.close = true + return r +} diff --git a/request_test.go b/request_test.go new file mode 100644 index 00000000..a326e1f2 --- /dev/null +++ b/request_test.go @@ -0,0 +1,1025 @@ +package req + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "testing" + "time" + + "github.com/imroc/req/v3/internal/header" + "github.com/imroc/req/v3/internal/tests" +) + +func TestMustSendMethods(t *testing.T) { + c := tc() + testCases := []struct { + SendReq func(req *Request, url string) *Response + ExpectMethod string + }{ + { + SendReq: func(req *Request, url string) *Response { + return req.MustGet(url) + }, + ExpectMethod: "GET", + }, + { + SendReq: func(req *Request, url string) *Response { + return req.MustPost(url) + }, + ExpectMethod: "POST", + }, + { + SendReq: func(req *Request, url string) *Response { + return req.MustPatch(url) + }, + ExpectMethod: "PATCH", + }, + { + SendReq: func(req *Request, url string) *Response { + return req.MustDelete(url) + }, + ExpectMethod: "DELETE", + }, + { + SendReq: func(req *Request, url string) *Response { + return req.MustOptions(url) + }, + ExpectMethod: "OPTIONS", + }, + { + SendReq: func(req *Request, url string) *Response { + return req.MustPut(url) + }, + ExpectMethod: "PUT", + }, + { + SendReq: func(req *Request, url string) *Response { + return req.MustHead(url) + }, + ExpectMethod: "HEAD", + }, + } + + for _, tc := range testCases { + testMethod(t, c, func(req *Request) *Response { + return tc.SendReq(req, "/") + }, tc.ExpectMethod, false) + } + + // test panic + for _, tc := range testCases { + testMethod(t, c, func(req *Request) *Response { + return tc.SendReq(req, "/\r\n") + }, tc.ExpectMethod, true) + } +} + +func TestSendMethods(t *testing.T) { + c := tc() + testCases := []struct { + SendReq func(req *Request) (resp *Response, err error) + ExpectMethod string + }{ + { + SendReq: func(req *Request) (resp *Response, err error) { + return req.Get("/") + }, + ExpectMethod: "GET", + }, + { + SendReq: func(req *Request) (resp *Response, err error) { + return req.Post("/") + }, + ExpectMethod: "POST", + }, + { + SendReq: func(req *Request) (resp *Response, err error) { + return req.Put("/") + }, + ExpectMethod: "PUT", + }, + { + SendReq: func(req *Request) (resp *Response, err error) { + return req.Patch("/") + }, + ExpectMethod: "PATCH", + }, + { + SendReq: func(req *Request) (resp *Response, err error) { + return req.Delete("/") + }, + ExpectMethod: "DELETE", + }, + { + SendReq: func(req *Request) (resp *Response, err error) { + return req.Options("/") + }, + ExpectMethod: "OPTIONS", + }, + { + SendReq: func(req *Request) (resp *Response, err error) { + return req.Head("/") + }, + ExpectMethod: "HEAD", + }, + { + SendReq: func(req *Request) (resp *Response, err error) { + return req.Send("GET", "/") + }, + ExpectMethod: "GET", + }, + } + for _, tc := range testCases { + testMethod(t, c, func(req *Request) *Response { + resp, err := tc.SendReq(req) + if err != nil { + t.Errorf("%s %s: %s", req.Method, req.RawURL, err.Error()) + } + return resp + }, tc.ExpectMethod, false) + } +} + +func testMethod(t *testing.T, c *Client, sendReq func(*Request) *Response, expectMethod string, expectPanic bool) { + r := c.R() + if expectPanic { + defer func() { + if err := recover(); err == nil { + t.Errorf("Must mehod %s should panic", expectMethod) + } + }() + } + resp := sendReq(r) + method := resp.Header.Get("Method") + if expectMethod != method { + t.Errorf("Expect method %s, got method %s", expectMethod, method) + } +} + +type dumpExpected struct { + ReqHeader bool + ReqBody bool + RespHeader bool + RespBody bool +} + +func testEnableDump(t *testing.T, fn func(r *Request) (de dumpExpected)) { + testDump := func(c *Client) { + r := c.R() + de := fn(r) + resp, err := r.SetBody(`test body`).Post("/") + assertSuccess(t, resp, err) + dump := resp.Dump() + tests.AssertContains(t, dump, "user-agent", de.ReqHeader) + tests.AssertContains(t, dump, "test body", de.ReqBody) + tests.AssertContains(t, dump, "date", de.RespHeader) + tests.AssertContains(t, dump, "testpost: text response", de.RespBody) + } + c := tc() + testDump(c) + testDump(c.EnableForceHTTP1()) +} + +func TestEnableDump(t *testing.T) { + testCases := []func(r *Request) (d dumpExpected){ + func(r *Request) (de dumpExpected) { + r.EnableDump() + de.ReqHeader = true + de.ReqBody = true + de.RespHeader = true + de.RespBody = true + return + }, + func(r *Request) (de dumpExpected) { + r.EnableDumpWithoutHeader() + de.ReqBody = true + de.RespBody = true + return + }, + func(r *Request) (de dumpExpected) { + r.EnableDumpWithoutBody() + de.ReqHeader = true + de.RespHeader = true + return + }, + func(r *Request) (de dumpExpected) { + r.EnableDumpWithoutRequest() + de.RespHeader = true + de.RespBody = true + return + }, + func(r *Request) (de dumpExpected) { + r.EnableDumpWithoutRequestBody() + de.ReqHeader = true + de.RespHeader = true + de.RespBody = true + return + }, + func(r *Request) (de dumpExpected) { + r.EnableDumpWithoutResponse() + de.ReqHeader = true + de.ReqBody = true + return + }, + func(r *Request) (de dumpExpected) { + r.EnableDumpWithoutResponseBody() + de.ReqHeader = true + de.ReqBody = true + de.RespHeader = true + return + }, + func(r *Request) (de dumpExpected) { + r.SetDumpOptions(&DumpOptions{ + RequestHeader: true, + RequestBody: true, + ResponseBody: true, + }).EnableDump() + de.ReqHeader = true + de.ReqBody = true + de.RespBody = true + return + }, + } + for _, fn := range testCases { + testEnableDump(t, fn) + } +} + +func TestEnableDumpTo(t *testing.T) { + buff := new(bytes.Buffer) + resp, err := tc().R().EnableDumpTo(buff).Get("/") + assertSuccess(t, resp, err) + tests.AssertEqual(t, true, buff.Len() > 0) +} + +func TestEnableDumpToFIle(t *testing.T) { + tmpFile := "tmp_dumpfile_req" + resp, err := tc().R().EnableDumpToFile(tests.GetTestFilePath(tmpFile)).Get("/") + assertSuccess(t, resp, err) + tests.AssertEqual(t, true, len(getTestFileContent(t, tmpFile)) > 0) + os.Remove(tests.GetTestFilePath(tmpFile)) +} + +func TestBadRequest(t *testing.T) { + resp, err := tc().R().Get("/bad-request") + assertStatus(t, resp, err, http.StatusBadRequest, "400 Bad Request") +} + +func TestSetBodyMarshal(t *testing.T) { + username := "imroc" + type User struct { + Username string `json:"username" xml:"username"` + } + + assertUsernameJson := func(body []byte) { + var user User + err := json.Unmarshal(body, &user) + tests.AssertNoError(t, err) + tests.AssertEqual(t, username, user.Username) + } + assertUsernameXml := func(body []byte) { + var user User + err := xml.Unmarshal(body, &user) + tests.AssertNoError(t, err) + tests.AssertEqual(t, username, user.Username) + } + + testCases := []struct { + Set func(r *Request) + Assert func(body []byte) + }{ + { // SetBody with map + Set: func(r *Request) { + m := map[string]interface{}{ + "username": username, + } + r.SetBody(&m) + }, + Assert: assertUsernameJson, + }, + { // SetBody with struct + Set: func(r *Request) { + var user User + user.Username = username + r.SetBody(&user) + }, + Assert: assertUsernameJson, + }, + { // SetBody with struct use xml + Set: func(r *Request) { + var user User + user.Username = username + r.SetBody(&user).SetContentType(header.XmlContentType) + }, + Assert: assertUsernameXml, + }, + { // SetBodyJsonMarshal with map + Set: func(r *Request) { + m := map[string]interface{}{ + "username": username, + } + r.SetBodyJsonMarshal(&m) + }, + Assert: assertUsernameJson, + }, + { // SetBodyJsonMarshal with struct + Set: func(r *Request) { + var user User + user.Username = username + r.SetBodyJsonMarshal(&user) + }, + Assert: assertUsernameJson, + }, + { // SetBodyXmlMarshal with struct + Set: func(r *Request) { + var user User + user.Username = username + r.SetBodyXmlMarshal(&user) + }, + Assert: assertUsernameXml, + }, + } + + c := tc() + for _, tc := range testCases { + r := c.R() + tc.Set(r) + var e Echo + resp, err := r.SetSuccessResult(&e).Post("/echo") + assertSuccess(t, resp, err) + tc.Assert([]byte(e.Body)) + } +} + +func TestDoAPIStyle(t *testing.T) { + c := tc() + user := &UserInfo{} + url := "/search?username=imroc&type=json" + + err := c.Get().SetURL(url).Do().Into(user) + tests.AssertEqual(t, true, err == nil) + tests.AssertEqual(t, "imroc", user.Username) +} + +func TestSetSuccessResult(t *testing.T) { + c := tc() + var user *UserInfo + url := "/search?username=imroc&type=json" + + resp, err := c.R().SetSuccessResult(&user).Get(url) + assertSuccess(t, resp, err) + tests.AssertEqual(t, "imroc", user.Username) + + user = &UserInfo{} + resp, err = c.R().SetSuccessResult(user).Get(url) + assertSuccess(t, resp, err) + tests.AssertEqual(t, "imroc", user.Username) + + user = nil + resp, err = c.R().SetSuccessResult(user).Get(url) + assertSuccess(t, resp, err) + tests.AssertEqual(t, "imroc", resp.Result().(*UserInfo).Username) +} + +func TestSetBody(t *testing.T) { + body := "hello" + fn := func() (io.ReadCloser, error) { + return io.NopCloser(bytes.NewBufferString(body)), nil + } + c := tc() + testCases := []struct { + SetBody func(r *Request) + ContentType string + }{ + { + SetBody: func(r *Request) { // SetBody with `func() (io.ReadCloser, error)` + r.SetBody(fn) + }, + }, + { + SetBody: func(r *Request) { // SetBody with GetContentFunc + r.SetBody(GetContentFunc(fn)) + }, + }, + { + SetBody: func(r *Request) { // SetBody with io.ReadCloser + r.SetBody(io.NopCloser(bytes.NewBufferString(body))) + }, + }, + { + SetBody: func(r *Request) { // SetBody with io.Reader + r.SetBody(bytes.NewBufferString(body)) + }, + }, + { + SetBody: func(r *Request) { // SetBody with string + r.SetBody(body) + }, + ContentType: header.PlainTextContentType, + }, + { + SetBody: func(r *Request) { // SetBody with []byte + r.SetBody([]byte(body)) + }, + ContentType: header.PlainTextContentType, + }, + { + SetBody: func(r *Request) { // SetBodyString + r.SetBodyString(body) + }, + ContentType: header.PlainTextContentType, + }, + { + SetBody: func(r *Request) { // SetBodyBytes + r.SetBodyBytes([]byte(body)) + }, + ContentType: header.PlainTextContentType, + }, + { + SetBody: func(r *Request) { // SetBodyJsonString + r.SetBodyJsonString(body) + }, + ContentType: header.JsonContentType, + }, + { + SetBody: func(r *Request) { // SetBodyJsonBytes + r.SetBodyJsonBytes([]byte(body)) + }, + ContentType: header.JsonContentType, + }, + { + SetBody: func(r *Request) { // SetBodyXmlString + r.SetBodyXmlString(body) + }, + ContentType: header.XmlContentType, + }, + { + SetBody: func(r *Request) { // SetBodyXmlBytes + r.SetBodyXmlBytes([]byte(body)) + }, + ContentType: header.XmlContentType, + }, + } + for _, tc := range testCases { + r := c.R() + tc.SetBody(r) + var e Echo + resp, err := r.SetSuccessResult(&e).Post("/echo") + assertSuccess(t, resp, err) + tests.AssertEqual(t, tc.ContentType, e.Header.Get(header.ContentType)) + tests.AssertEqual(t, body, e.Body) + } +} + +func TestCookie(t *testing.T) { + headers := make(http.Header) + resp, err := tc().R().SetCookies( + &http.Cookie{ + Name: "cookie1", + Value: "value1", + }, + &http.Cookie{ + Name: "cookie2", + Value: "value2", + }, + ).SetSuccessResult(&headers).Get("/header") + assertSuccess(t, resp, err) + tests.AssertEqual(t, "cookie1=value1; cookie2=value2", headers.Get("Cookie")) +} + +func TestSetBasicAuth(t *testing.T) { + headers := make(http.Header) + resp, err := tc().R(). + SetBasicAuth("imroc", "123456"). + SetSuccessResult(&headers). + Get("/header") + assertSuccess(t, resp, err) + tests.AssertEqual(t, "Basic aW1yb2M6MTIzNDU2", headers.Get("Authorization")) +} + +func TestSetBearerAuthToken(t *testing.T) { + token := "NGU1ZWYwZDJhNmZhZmJhODhmMjQ3ZDc4" + headers := make(http.Header) + resp, err := tc().R(). + SetBearerAuthToken(token). + SetSuccessResult(&headers). + Get("/header") + assertSuccess(t, resp, err) + tests.AssertEqual(t, "Bearer "+token, headers.Get("Authorization")) +} + +func TestHeader(t *testing.T) { + testWithAllTransport(t, testHeader) +} + +func testHeader(t *testing.T, c *Client) { + // Set User-Agent + customUserAgent := "My Custom User Agent" + resp, err := c.R().SetHeader(header.UserAgent, customUserAgent).Get("/user-agent") + assertSuccess(t, resp, err) + tests.AssertEqual(t, customUserAgent, resp.String()) + + // Set custom header + headers := make(http.Header) + resp, err = c.R(). + SetHeader("header1", "value1"). + SetHeaders(map[string]string{ + "header2": "value2", + "header3": "value3", + }).SetSuccessResult(&headers). + Get("/header") + assertSuccess(t, resp, err) + tests.AssertEqual(t, "value1", headers.Get("header1")) + tests.AssertEqual(t, "value2", headers.Get("header2")) + tests.AssertEqual(t, "value3", headers.Get("header3")) +} + +func TestSetHeaderNonCanonical(t *testing.T) { + // set headers + key := "spring.cloud.function.Routing-expression" + c := tc().EnableForceHTTP1() + resp, err := c.R().EnableDumpWithoutResponse(). + SetHeadersNonCanonical(map[string]string{ + key: "test", + }).Get("/header") + assertSuccess(t, resp, err) + tests.AssertEqual(t, true, strings.Contains(resp.Dump(), key)) + + resp, err = c.R(). + EnableDumpWithoutResponse(). + SetHeaderNonCanonical(key, "test"). + Get("/header") + assertSuccess(t, resp, err) + tests.AssertEqual(t, true, strings.Contains(resp.Dump(), key)) + + c.SetCommonHeaderNonCanonical(key, "test") + resp, err = c.R(). + EnableDumpWithoutResponse(). + Get("/header") + assertSuccess(t, resp, err) + tests.AssertEqual(t, true, strings.Contains(resp.Dump(), key)) +} + +func TestQueryParam(t *testing.T) { + testWithAllTransport(t, testQueryParam) +} + +func testQueryParam(t *testing.T, c *Client) { + // Set query param at client level, should be overwritten at request level + c.SetCommonQueryParam("key1", "client"). + SetCommonQueryParams(map[string]string{ + "key2": "client", + "key3": "client", + }). + SetCommonQueryString("key4=client&key5=client"). + AddCommonQueryParam("key5", "extra") + + // SetQueryParam + resp, err := c.R(). + SetQueryParam("key1", "value1"). + SetQueryParam("key2", "value2"). + SetQueryParam("key3", "value3"). + Get("/query-parameter") + assertSuccess(t, resp, err) + tests.AssertEqual(t, "key1=value1&key2=value2&key3=value3&key4=client&key5=client&key5=extra", resp.String()) + + // SetQueryString + resp, err = c.R(). + SetQueryString("key1=value1&key2=value2&key3=value3"). + Get("/query-parameter") + assertSuccess(t, resp, err) + tests.AssertEqual(t, "key1=value1&key2=value2&key3=value3&key4=client&key5=client&key5=extra", resp.String()) + + // SetQueryParams + resp, err = c.R(). + SetQueryParams(map[string]string{ + "key1": "value1", + "key2": "value2", + "key3": "value3", + }). + Get("/query-parameter") + assertSuccess(t, resp, err) + tests.AssertEqual(t, "key1=value1&key2=value2&key3=value3&key4=client&key5=client&key5=extra", resp.String()) + + // SetQueryParam & SetQueryParams & SetQueryString + resp, err = c.R(). + SetQueryParam("key1", "value1"). + SetQueryParams(map[string]string{ + "key2": "value2", + "key3": "value3", + }). + SetQueryString("key4=value4&key5=value5"). + Get("/query-parameter") + assertSuccess(t, resp, err) + tests.AssertEqual(t, "key1=value1&key2=value2&key3=value3&key4=value4&key5=value5", resp.String()) + + // Add same param without override + resp, err = c.R(). + SetQueryParam("key1", "value1"). + SetQueryParams(map[string]string{ + "key2": "value2", + "key3": "value3", + }). + SetQueryString("key4=value4&key5=value5"). + AddQueryParam("key1", "value11"). + AddQueryParam("key2", "value22"). + AddQueryParam("key4", "value44"). + AddQueryParams("key6", "value6", "value66"). + Get("/query-parameter") + assertSuccess(t, resp, err) + tests.AssertEqual(t, "key1=value1&key1=value11&key2=value2&key2=value22&key3=value3&key4=value4&key4=value44&key5=value5&key6=value6&key6=value66", resp.String()) +} + +func TestPathParam(t *testing.T) { + testPathParam(t, tc()) + testPathParam(t, tc().EnableForceHTTP1()) +} + +func testPathParam(t *testing.T, c *Client) { + username := "imroc" + resp, err := c.R(). + SetPathParam("username", username). + Get("/user/{username}/profile") + assertSuccess(t, resp, err) + tests.AssertEqual(t, fmt.Sprintf("%s's profile", username), resp.String()) +} + +func TestSuccess(t *testing.T) { + testWithAllTransport(t, testSuccess) +} + +func testSuccess(t *testing.T, c *Client) { + var userInfo UserInfo + resp, err := c.R(). + SetQueryParam("username", "imroc"). + SetSuccessResult(&userInfo). + Get("/search") + assertSuccess(t, resp, err) + tests.AssertEqual(t, "roc@imroc.cc", userInfo.Email) + + userInfo = UserInfo{} + resp, err = c.R(). + SetQueryParam("username", "imroc"). + SetQueryParam("type", "xml"). // auto unmarshal to xml + SetSuccessResult(&userInfo).EnableDump(). + Get("/search") + assertSuccess(t, resp, err) + tests.AssertEqual(t, "roc@imroc.cc", userInfo.Email) +} + +func TestError(t *testing.T) { + testWithAllTransport(t, testError) +} + +func testError(t *testing.T, c *Client) { + var errMsg ErrorMessage + resp, err := c.R(). + SetQueryParam("username", ""). + SetErrorResult(&errMsg). + Get("/search") + assertIsError(t, resp, err) + tests.AssertEqual(t, 10000, errMsg.ErrorCode) + + errMsg = ErrorMessage{} + resp, err = c.R(). + SetQueryParam("username", "test"). + SetErrorResult(&errMsg). + Get("/search") + assertIsError(t, resp, err) + tests.AssertEqual(t, 10001, errMsg.ErrorCode) + + errMsg = ErrorMessage{} + resp, err = c.R(). + SetQueryParam("username", "test"). + SetQueryParam("type", "xml"). // auto unmarshal to xml + SetErrorResult(&errMsg). + Get("/search") + assertIsError(t, resp, err) + tests.AssertEqual(t, 10001, errMsg.ErrorCode) + + c.SetCommonErrorResult(&errMsg) + resp, err = c.R(). + SetQueryParam("username", ""). + Get("/search") + assertIsError(t, resp, err) + em, ok := resp.Error().(*ErrorMessage) + tests.AssertEqual(t, true, ok) + tests.AssertEqual(t, 10000, em.ErrorCode) +} + +func TestForm(t *testing.T) { + testWithAllTransport(t, testForm) +} + +func testForm(t *testing.T, c *Client) { + var userInfo UserInfo + resp, err := c.R(). + SetFormData(map[string]string{ + "username": "imroc", + "type": "xml", + }). + SetSuccessResult(&userInfo). + Post("/search") + assertSuccess(t, resp, err) + tests.AssertEqual(t, "roc@imroc.cc", userInfo.Email) + + v := make(url.Values) + v.Add("username", "imroc") + v.Add("type", "xml") + resp, err = c.R(). + SetFormDataFromValues(v). + SetSuccessResult(&userInfo). + Post("/search") + assertSuccess(t, resp, err) + tests.AssertEqual(t, "roc@imroc.cc", userInfo.Email) +} + +func TestHostHeaderOverride(t *testing.T) { + testWithAllTransport(t, testHostHeaderOverride) +} + +func testHostHeaderOverride(t *testing.T, c *Client) { + resp, err := c.R().SetHeader("Host", "testhostname").Get("/host-header") + assertSuccess(t, resp, err) + tests.AssertEqual(t, "testhostname", resp.String()) +} + +func assertTraceInfo(t *testing.T, resp *Response, enable bool) { + ti := resp.TraceInfo() + tests.AssertEqual(t, true, resp.TotalTime() > 0) + if !enable { + tests.AssertEqual(t, false, ti.TotalTime > 0) + tests.AssertIsNil(t, ti.RemoteAddr) + tests.AssertContains(t, ti.String(), "not enabled", true) + tests.AssertContains(t, ti.Blame(), "not enabled", true) + return + } + + tests.AssertContains(t, ti.String(), "not enabled", false) + tests.AssertContains(t, ti.Blame(), "not enabled", false) + tests.AssertEqual(t, true, ti.TotalTime > 0) + tests.AssertEqual(t, true, ti.ConnectTime > 0) + tests.AssertEqual(t, true, ti.FirstResponseTime > 0) + tests.AssertEqual(t, true, ti.ResponseTime > 0) + tests.AssertNotNil(t, ti.RemoteAddr) + if ti.IsConnReused { + tests.AssertEqual(t, true, ti.TCPConnectTime == 0) + tests.AssertEqual(t, true, ti.TLSHandshakeTime == 0) + } else { + tests.AssertEqual(t, true, ti.TCPConnectTime > 0) + tests.AssertEqual(t, true, ti.TLSHandshakeTime > 0) + } +} + +func assertEnableTraceInfo(t *testing.T, resp *Response) { + assertTraceInfo(t, resp, true) +} + +func assertDisableTraceInfo(t *testing.T, resp *Response) { + assertTraceInfo(t, resp, false) +} + +func TestTraceInfo(t *testing.T) { + testWithAllTransport(t, testTraceInfo) +} + +func testTraceInfo(t *testing.T, c *Client) { + // enable trace at client level + c.EnableTraceAll() + resp, err := c.R().Get("/") + assertSuccess(t, resp, err) + assertEnableTraceInfo(t, resp) + + // disable trace at client level + c.DisableTraceAll() + resp, err = c.R().Get("/") + assertSuccess(t, resp, err) + assertDisableTraceInfo(t, resp) + + // enable trace at request level + resp, err = c.R().EnableTrace().Get("/") + assertSuccess(t, resp, err) + assertEnableTraceInfo(t, resp) +} + +func TestTraceOnTimeout(t *testing.T) { + testWithAllTransport(t, func(t *testing.T, c *Client) { + c.EnableTraceAll().SetTimeout(100 * time.Millisecond) + + resp, err := c.R().Get("http://req-nowhere.local") + tests.AssertNotNil(t, err) + tests.AssertNotNil(t, resp) + + ti := resp.TraceInfo() + tests.AssertEqual(t, true, ti.DNSLookupTime >= 0) + tests.AssertEqual(t, true, ti.ConnectTime == 0) + tests.AssertEqual(t, true, ti.TLSHandshakeTime == 0) + tests.AssertEqual(t, true, ti.TCPConnectTime == 0) + tests.AssertEqual(t, true, ti.FirstResponseTime == 0) + tests.AssertEqual(t, true, ti.ResponseTime == 0) + tests.AssertEqual(t, true, ti.TotalTime > 0) + tests.AssertEqual(t, true, ti.TotalTime == resp.TotalTime()) + }) +} + +func TestAutoDetectRequestContentType(t *testing.T) { + c := tc() + resp, err := c.R().SetBody(getTestFileContent(t, "sample-image.png")).Post("/content-type") + assertSuccess(t, resp, err) + tests.AssertEqual(t, "image/png", resp.String()) + + resp, err = c.R().SetBodyJsonString(`{"msg": "test"}`).Post("/content-type") + assertSuccess(t, resp, err) + tests.AssertEqual(t, header.JsonContentType, resp.String()) + + resp, err = c.R().SetContentType(header.XmlContentType).SetBody(`{"msg": "test"}`).Post("/content-type") + assertSuccess(t, resp, err) + tests.AssertEqual(t, header.XmlContentType, resp.String()) + + resp, err = c.R().SetBody(`

hello

`).Post("/content-type") + assertSuccess(t, resp, err) + tests.AssertEqual(t, "text/html; charset=utf-8", resp.String()) + + resp, err = c.R().SetBody(`hello world`).Post("/content-type") + assertSuccess(t, resp, err) + tests.AssertEqual(t, header.PlainTextContentType, resp.String()) +} + +func TestSetFileUploadCheck(t *testing.T) { + c := tc() + resp, err := c.R().SetFileUpload(FileUpload{}).Post("/multipart") + tests.AssertErrorContains(t, err, "missing param name") + tests.AssertErrorContains(t, err, "missing filename") + tests.AssertErrorContains(t, err, "missing file content") + tests.AssertEqual(t, 0, len(resp.Request.uploadFiles)) +} + +func TestUploadMultipart(t *testing.T) { + m := make(map[string]interface{}) + resp, err := tc().R(). + SetFile("file", tests.GetTestFilePath("sample-image.png")). + SetFiles(map[string]string{"file": tests.GetTestFilePath("sample-file.txt")}). + SetFormData(map[string]string{ + "param1": "value1", + "param2": "value2", + }). + SetSuccessResult(&m). + Post("/multipart") + assertSuccess(t, resp, err) + tests.AssertContains(t, resp.String(), "sample-image.png", true) + tests.AssertContains(t, resp.String(), "sample-file.txt", true) + tests.AssertContains(t, resp.String(), "value1", true) + tests.AssertContains(t, resp.String(), "value2", true) +} + +func TestFixPragmaCache(t *testing.T) { + resp, err := tc().EnableForceHTTP1().R().Get("/pragma") + assertSuccess(t, resp, err) + tests.AssertEqual(t, "no-cache", resp.Header.Get("Cache-Control")) +} + +func TestSetFileBytes(t *testing.T) { + resp := uploadTextFile(t, func(r *Request) { + r.SetFileBytes("file", "file.txt", []byte("test")) + }) + tests.AssertEqual(t, "test", resp.String()) +} + +func TestSetFileReader(t *testing.T) { + buff := bytes.NewBufferString("test") + resp := uploadTextFile(t, func(r *Request) { + r.SetFileReader("file", "file.txt", buff) + }) + tests.AssertEqual(t, "test", resp.String()) + + buff = bytes.NewBufferString("test") + resp = uploadTextFile(t, func(r *Request) { + r.SetFileReader("file", "file.txt", io.NopCloser(buff)) + }) + tests.AssertEqual(t, "test", resp.String()) +} + +func TestSetFileWithRetry(t *testing.T) { + resp, err := tc().R(). + SetRetryCount(3). + SetRetryCondition(func(resp *Response, err error) bool { + return err != nil || resp.StatusCode > 499 + }). + SetRetryHook(func(resp *Response, err error) { + resp.Request.SetQueryParam("attempt", strconv.Itoa(resp.Request.RetryAttempt)) + }). + SetFile("file", tests.GetTestFilePath("sample-file.txt")). + SetQueryParam("attempt", "0"). + Post("/file-text") + assertSuccess(t, resp, err) + tests.AssertEqual(t, 2, resp.Request.RetryAttempt) +} + +func TestSetFile(t *testing.T) { + filename := "sample-file.txt" + resp := uploadTextFile(t, func(r *Request) { + r.SetFile("file", tests.GetTestFilePath(filename)) + }) + tests.AssertEqual(t, getTestFileContent(t, filename), resp.Bytes()) + + resp, err := tc().SetLogger(nil).R().SetFile("file", "file-not-exists.txt").Post("/file-text") + tests.AssertErrorContains(t, err, "no such file") +} + +func TestSetFiles(t *testing.T) { + filename := "sample-file.txt" + resp := uploadTextFile(t, func(r *Request) { + r.SetFiles(map[string]string{ + "file": tests.GetTestFilePath(filename), + }) + }) + tests.AssertEqual(t, getTestFileContent(t, filename), resp.Bytes()) +} + +func uploadTextFile(t *testing.T, setReq func(r *Request)) *Response { + r := tc().R() + setReq(r) + resp, err := r.Post("/file-text") + assertSuccess(t, resp, err) + return resp +} + +type SlowReader struct { + io.ReadCloser +} + +func (r *SlowReader) Read(p []byte) (int, error) { + time.Sleep(100 * time.Millisecond) + return r.ReadCloser.Read(p) +} + +func TestUploadCallback(t *testing.T) { + r := tc().R() + file := "transport.go" + fileInfo, err := os.Stat(file) + if err != nil { + t.Fatal(err) + } + r.SetFile("file", file) + r.uploadFiles[0].FileSize = fileInfo.Size() + content, err := r.uploadFiles[0].GetFileContent() + if err != nil { + t.Fatal(err) + } + r.uploadFiles[0].GetFileContent = func() (io.ReadCloser, error) { + return &SlowReader{content}, nil + } + n := 0 + r.SetUploadCallback(func(info UploadInfo) { + n++ + }) + resp, err := r.Post("/raw-upload") + assertSuccess(t, resp, err) + tests.AssertEqual(t, true, n > 1) +} + +func TestDownloadCallback(t *testing.T) { + n := 0 + resp, err := tc().R(). + SetOutput(io.Discard). + SetDownloadCallback(func(info DownloadInfo) { + n++ + }).Get("/download") + assertSuccess(t, resp, err) + tests.AssertEqual(t, true, n > 0) +} + +func TestRequestDisableAutoReadResponse(t *testing.T) { + testWithAllTransport(t, func(t *testing.T, c *Client) { + resp, err := c.R().DisableAutoReadResponse().Get("/") + assertSuccess(t, resp, err) + tests.AssertEqual(t, "", resp.String()) + result, err := resp.ToString() + tests.AssertNoError(t, err) + tests.AssertEqual(t, "TestGet: text response", result) + + resp, err = c.R().DisableAutoReadResponse().Get("/") + assertSuccess(t, resp, err) + _, err = io.ReadAll(resp.Body) + tests.AssertNoError(t, err) + }) +} + +func TestRestoreResponseBody(t *testing.T) { + c := tc() + resp, err := c.R().Get("/") + assertSuccess(t, resp, err) + tests.AssertNoError(t, err) + tests.AssertEqual(t, true, len(resp.Bytes()) > 0) + body, err := io.ReadAll(resp.Body) + tests.AssertNoError(t, err) + tests.AssertEqual(t, true, len(body) > 0) +} diff --git a/request_wrapper.go b/request_wrapper.go new file mode 100644 index 00000000..cdc3f685 --- /dev/null +++ b/request_wrapper.go @@ -0,0 +1,529 @@ +package req + +import ( + "context" + "io" + "net/http" + "net/url" + "time" +) + +// SetURL is a global wrapper methods which delegated +// to the default client, create a request and SetURL for request. +func SetURL(url string) *Request { + return defaultClient.R().SetURL(url) +} + +// SetFormDataFromValues is a global wrapper methods which delegated +// to the default client, create a request and SetFormDataFromValues for request. +func SetFormDataFromValues(data url.Values) *Request { + return defaultClient.R().SetFormDataFromValues(data) +} + +// SetFormData is a global wrapper methods which delegated +// to the default client, create a request and SetFormData for request. +func SetFormData(data map[string]string) *Request { + return defaultClient.R().SetFormData(data) +} + +// SetOrderedFormData is a global wrapper methods which delegated +// to the default client, create a request and SetOrderedFormData for request. +func SetOrderedFormData(kvs ...string) *Request { + return defaultClient.R().SetOrderedFormData(kvs...) +} + +// SetFormDataAnyType is a global wrapper methods which delegated +// to the default client, create a request and SetFormDataAnyType for request. +func SetFormDataAnyType(data map[string]interface{}) *Request { + return defaultClient.R().SetFormDataAnyType(data) +} + +// SetCookies is a global wrapper methods which delegated +// to the default client, create a request and SetCookies for request. +func SetCookies(cookies ...*http.Cookie) *Request { + return defaultClient.R().SetCookies(cookies...) +} + +// SetQueryString is a global wrapper methods which delegated +// to the default client, create a request and SetQueryString for request. +func SetQueryString(query string) *Request { + return defaultClient.R().SetQueryString(query) +} + +// SetFileReader is a global wrapper methods which delegated +// to the default client, create a request and SetFileReader for request. +func SetFileReader(paramName, filePath string, reader io.Reader) *Request { + return defaultClient.R().SetFileReader(paramName, filePath, reader) +} + +// SetFileBytes is a global wrapper methods which delegated +// to the default client, create a request and SetFileBytes for request. +func SetFileBytes(paramName, filename string, content []byte) *Request { + return defaultClient.R().SetFileBytes(paramName, filename, content) +} + +// SetFiles is a global wrapper methods which delegated +// to the default client, create a request and SetFiles for request. +func SetFiles(files map[string]string) *Request { + return defaultClient.R().SetFiles(files) +} + +// SetFile is a global wrapper methods which delegated +// to the default client, create a request and SetFile for request. +func SetFile(paramName, filePath string) *Request { + return defaultClient.R().SetFile(paramName, filePath) +} + +// SetFileUpload is a global wrapper methods which delegated +// to the default client, create a request and SetFileUpload for request. +func SetFileUpload(f ...FileUpload) *Request { + return defaultClient.R().SetFileUpload(f...) +} + +// SetResult is a global wrapper methods which delegated +// to the default client, create a request and SetSuccessResult for request. +// +// Deprecated: Use SetSuccessResult instead. +func SetResult(result interface{}) *Request { + return defaultClient.R().SetSuccessResult(result) +} + +// SetSuccessResult is a global wrapper methods which delegated +// to the default client, create a request and SetSuccessResult for request. +func SetSuccessResult(result interface{}) *Request { + return defaultClient.R().SetSuccessResult(result) +} + +// SetError is a global wrapper methods which delegated +// to the default client, create a request and SetErrorResult for request. +// +// Deprecated: Use SetErrorResult instead. +func SetError(error interface{}) *Request { + return defaultClient.R().SetErrorResult(error) +} + +// SetErrorResult is a global wrapper methods which delegated +// to the default client, create a request and SetErrorResult for request. +func SetErrorResult(error interface{}) *Request { + return defaultClient.R().SetErrorResult(error) +} + +// SetBearerAuthToken is a global wrapper methods which delegated +// to the default client, create a request and SetBearerAuthToken for request. +func SetBearerAuthToken(token string) *Request { + return defaultClient.R().SetBearerAuthToken(token) +} + +// SetBasicAuth is a global wrapper methods which delegated +// to the default client, create a request and SetBasicAuth for request. +func SetBasicAuth(username, password string) *Request { + return defaultClient.R().SetBasicAuth(username, password) +} + +// SetDigestAuth is a global wrapper methods which delegated +// to the default client, create a request and SetDigestAuth for request. +func SetDigestAuth(username, password string) *Request { + return defaultClient.R().SetDigestAuth(username, password) +} + +// SetHeaders is a global wrapper methods which delegated +// to the default client, create a request and SetHeaders for request. +func SetHeaders(hdrs map[string]string) *Request { + return defaultClient.R().SetHeaders(hdrs) +} + +// SetHeader is a global wrapper methods which delegated +// to the default client, create a request and SetHeader for request. +func SetHeader(key, value string) *Request { + return defaultClient.R().SetHeader(key, value) +} + +// SetHeaderOrder is a global wrapper methods which delegated +// to the default client, create a request and SetHeaderOrder for request. +func SetHeaderOrder(keys ...string) *Request { + return defaultClient.R().SetHeaderOrder(keys...) +} + +// SetPseudoHeaderOrder is a global wrapper methods which delegated +// to the default client, create a request and SetPseudoHeaderOrder for request. +func SetPseudoHeaderOrder(keys ...string) *Request { + return defaultClient.R().SetPseudoHeaderOrder(keys...) +} + +// SetOutputFile is a global wrapper methods which delegated +// to the default client, create a request and SetOutputFile for request. +func SetOutputFile(file string) *Request { + return defaultClient.R().SetOutputFile(file) +} + +// SetOutput is a global wrapper methods which delegated +// to the default client, create a request and SetOutput for request. +func SetOutput(output io.Writer) *Request { + return defaultClient.R().SetOutput(output) +} + +// SetQueryParams is a global wrapper methods which delegated +// to the default client, create a request and SetQueryParams for request. +func SetQueryParams(params map[string]string) *Request { + return defaultClient.R().SetQueryParams(params) +} + +// SetQueryParamsAnyType is a global wrapper methods which delegated +// to the default client, create a request and SetQueryParamsAnyType for request. +func SetQueryParamsAnyType(params map[string]interface{}) *Request { + return defaultClient.R().SetQueryParamsAnyType(params) +} + +// SetQueryParam is a global wrapper methods which delegated +// to the default client, create a request and SetQueryParam for request. +func SetQueryParam(key, value string) *Request { + return defaultClient.R().SetQueryParam(key, value) +} + +// AddQueryParam is a global wrapper methods which delegated +// to the default client, create a request and AddQueryParam for request. +func AddQueryParam(key, value string) *Request { + return defaultClient.R().AddQueryParam(key, value) +} + +// AddQueryParams is a global wrapper methods which delegated +// to the default client, create a request and AddQueryParams for request. +func AddQueryParams(key string, values ...string) *Request { + return defaultClient.R().AddQueryParams(key, values...) +} + +// SetPathParams is a global wrapper methods which delegated +// to the default client, create a request and SetPathParams for request. +func SetPathParams(params map[string]string) *Request { + return defaultClient.R().SetPathParams(params) +} + +// SetPathParam is a global wrapper methods which delegated +// to the default client, create a request and SetPathParam for request. +func SetPathParam(key, value string) *Request { + return defaultClient.R().SetPathParam(key, value) +} + +// MustGet is a global wrapper methods which delegated +// to the default client, create a request and MustGet for request. +func MustGet(url string) *Response { + return defaultClient.R().MustGet(url) +} + +// Get is a global wrapper methods which delegated +// to the default client, create a request and Get for request. +func Get(url string) (*Response, error) { + return defaultClient.R().Get(url) +} + +// MustPost is a global wrapper methods which delegated +// to the default client, create a request and Get for request. +func MustPost(url string) *Response { + return defaultClient.R().MustPost(url) +} + +// Post is a global wrapper methods which delegated +// to the default client, create a request and Post for request. +func Post(url string) (*Response, error) { + return defaultClient.R().Post(url) +} + +// MustPut is a global wrapper methods which delegated +// to the default client, create a request and MustPut for request. +func MustPut(url string) *Response { + return defaultClient.R().MustPut(url) +} + +// Put is a global wrapper methods which delegated +// to the default client, create a request and Put for request. +func Put(url string) (*Response, error) { + return defaultClient.R().Put(url) +} + +// MustPatch is a global wrapper methods which delegated +// to the default client, create a request and MustPatch for request. +func MustPatch(url string) *Response { + return defaultClient.R().MustPatch(url) +} + +// Patch is a global wrapper methods which delegated +// to the default client, create a request and Patch for request. +func Patch(url string) (*Response, error) { + return defaultClient.R().Patch(url) +} + +// MustDelete is a global wrapper methods which delegated +// to the default client, create a request and MustDelete for request. +func MustDelete(url string) *Response { + return defaultClient.R().MustDelete(url) +} + +// Delete is a global wrapper methods which delegated +// to the default client, create a request and Delete for request. +func Delete(url string) (*Response, error) { + return defaultClient.R().Delete(url) +} + +// MustOptions is a global wrapper methods which delegated +// to the default client, create a request and MustOptions for request. +func MustOptions(url string) *Response { + return defaultClient.R().MustOptions(url) +} + +// Options is a global wrapper methods which delegated +// to the default client, create a request and Options for request. +func Options(url string) (*Response, error) { + return defaultClient.R().Options(url) +} + +// MustHead is a global wrapper methods which delegated +// to the default client, create a request and MustHead for request. +func MustHead(url string) *Response { + return defaultClient.R().MustHead(url) +} + +// Head is a global wrapper methods which delegated +// to the default client, create a request and Head for request. +func Head(url string) (*Response, error) { + return defaultClient.R().Head(url) +} + +// SetBody is a global wrapper methods which delegated +// to the default client, create a request and SetBody for request. +func SetBody(body interface{}) *Request { + return defaultClient.R().SetBody(body) +} + +// SetBodyBytes is a global wrapper methods which delegated +// to the default client, create a request and SetBodyBytes for request. +func SetBodyBytes(body []byte) *Request { + return defaultClient.R().SetBodyBytes(body) +} + +// SetBodyString is a global wrapper methods which delegated +// to the default client, create a request and SetBodyString for request. +func SetBodyString(body string) *Request { + return defaultClient.R().SetBodyString(body) +} + +// SetBodyJsonString is a global wrapper methods which delegated +// to the default client, create a request and SetBodyJsonString for request. +func SetBodyJsonString(body string) *Request { + return defaultClient.R().SetBodyJsonString(body) +} + +// SetBodyJsonBytes is a global wrapper methods which delegated +// to the default client, create a request and SetBodyJsonBytes for request. +func SetBodyJsonBytes(body []byte) *Request { + return defaultClient.R().SetBodyJsonBytes(body) +} + +// SetBodyJsonMarshal is a global wrapper methods which delegated +// to the default client, create a request and SetBodyJsonMarshal for request. +func SetBodyJsonMarshal(v interface{}) *Request { + return defaultClient.R().SetBodyJsonMarshal(v) +} + +// SetBodyXmlString is a global wrapper methods which delegated +// to the default client, create a request and SetBodyXmlString for request. +func SetBodyXmlString(body string) *Request { + return defaultClient.R().SetBodyXmlString(body) +} + +// SetBodyXmlBytes is a global wrapper methods which delegated +// to the default client, create a request and SetBodyXmlBytes for request. +func SetBodyXmlBytes(body []byte) *Request { + return defaultClient.R().SetBodyXmlBytes(body) +} + +// SetBodyXmlMarshal is a global wrapper methods which delegated +// to the default client, create a request and SetBodyXmlMarshal for request. +func SetBodyXmlMarshal(v interface{}) *Request { + return defaultClient.R().SetBodyXmlMarshal(v) +} + +// SetContentType is a global wrapper methods which delegated +// to the default client, create a request and SetContentType for request. +func SetContentType(contentType string) *Request { + return defaultClient.R().SetContentType(contentType) +} + +// SetContext is a global wrapper methods which delegated +// to the default client, create a request and SetContext for request. +func SetContext(ctx context.Context) *Request { + return defaultClient.R().SetContext(ctx) +} + +// DisableTrace is a global wrapper methods which delegated +// to the default client, create a request and DisableTrace for request. +func DisableTrace() *Request { + return defaultClient.R().DisableTrace() +} + +// EnableTrace is a global wrapper methods which delegated +// to the default client, create a request and EnableTrace for request. +func EnableTrace() *Request { + return defaultClient.R().EnableTrace() +} + +// EnableForceChunkedEncoding is a global wrapper methods which delegated +// to the default client, create a request and EnableForceChunkedEncoding for request. +func EnableForceChunkedEncoding() *Request { + return defaultClient.R().EnableForceChunkedEncoding() +} + +// DisableForceChunkedEncoding is a global wrapper methods which delegated +// to the default client, create a request and DisableForceChunkedEncoding for request. +func DisableForceChunkedEncoding() *Request { + return defaultClient.R().DisableForceChunkedEncoding() +} + +// EnableForceMultipart is a global wrapper methods which delegated +// to the default client, create a request and EnableForceMultipart for request. +func EnableForceMultipart() *Request { + return defaultClient.R().EnableForceMultipart() +} + +// DisableForceMultipart is a global wrapper methods which delegated +// to the default client, create a request and DisableForceMultipart for request. +func DisableForceMultipart() *Request { + return defaultClient.R().DisableForceMultipart() +} + +// EnableDumpTo is a global wrapper methods which delegated +// to the default client, create a request and EnableDumpTo for request. +func EnableDumpTo(output io.Writer) *Request { + return defaultClient.R().EnableDumpTo(output) +} + +// EnableDumpToFile is a global wrapper methods which delegated +// to the default client, create a request and EnableDumpToFile for request. +func EnableDumpToFile(filename string) *Request { + return defaultClient.R().EnableDumpToFile(filename) +} + +// SetDumpOptions is a global wrapper methods which delegated +// to the default client, create a request and SetDumpOptions for request. +func SetDumpOptions(opt *DumpOptions) *Request { + return defaultClient.R().SetDumpOptions(opt) +} + +// EnableDump is a global wrapper methods which delegated +// to the default client, create a request and EnableDump for request. +func EnableDump() *Request { + return defaultClient.R().EnableDump() +} + +// EnableDumpWithoutBody is a global wrapper methods which delegated +// to the default client, create a request and EnableDumpWithoutBody for request. +func EnableDumpWithoutBody() *Request { + return defaultClient.R().EnableDumpWithoutBody() +} + +// EnableDumpWithoutHeader is a global wrapper methods which delegated +// to the default client, create a request and EnableDumpWithoutHeader for request. +func EnableDumpWithoutHeader() *Request { + return defaultClient.R().EnableDumpWithoutHeader() +} + +// EnableDumpWithoutResponse is a global wrapper methods which delegated +// to the default client, create a request and EnableDumpWithoutResponse for request. +func EnableDumpWithoutResponse() *Request { + return defaultClient.R().EnableDumpWithoutResponse() +} + +// EnableDumpWithoutRequest is a global wrapper methods which delegated +// to the default client, create a request and EnableDumpWithoutRequest for request. +func EnableDumpWithoutRequest() *Request { + return defaultClient.R().EnableDumpWithoutRequest() +} + +// EnableDumpWithoutRequestBody is a global wrapper methods which delegated +// to the default client, create a request and EnableDumpWithoutRequestBody for request. +func EnableDumpWithoutRequestBody() *Request { + return defaultClient.R().EnableDumpWithoutRequestBody() +} + +// EnableDumpWithoutResponseBody is a global wrapper methods which delegated +// to the default client, create a request and EnableDumpWithoutResponseBody for request. +func EnableDumpWithoutResponseBody() *Request { + return defaultClient.R().EnableDumpWithoutResponseBody() +} + +// SetRetryCount is a global wrapper methods which delegated +// to the default client, create a request and SetRetryCount for request. +func SetRetryCount(count int) *Request { + return defaultClient.R().SetRetryCount(count) +} + +// SetRetryInterval is a global wrapper methods which delegated +// to the default client, create a request and SetRetryInterval for request. +func SetRetryInterval(getRetryIntervalFunc GetRetryIntervalFunc) *Request { + return defaultClient.R().SetRetryInterval(getRetryIntervalFunc) +} + +// SetRetryFixedInterval is a global wrapper methods which delegated +// to the default client, create a request and SetRetryFixedInterval for request. +func SetRetryFixedInterval(interval time.Duration) *Request { + return defaultClient.R().SetRetryFixedInterval(interval) +} + +// SetRetryBackoffInterval is a global wrapper methods which delegated +// to the default client, create a request and SetRetryBackoffInterval for request. +func SetRetryBackoffInterval(min, max time.Duration) *Request { + return defaultClient.R().SetRetryBackoffInterval(min, max) +} + +// SetRetryHook is a global wrapper methods which delegated +// to the default client, create a request and SetRetryHook for request. +func SetRetryHook(hook RetryHookFunc) *Request { + return defaultClient.R().SetRetryHook(hook) +} + +// AddRetryHook is a global wrapper methods which delegated +// to the default client, create a request and AddRetryHook for request. +func AddRetryHook(hook RetryHookFunc) *Request { + return defaultClient.R().AddRetryHook(hook) +} + +// SetRetryCondition is a global wrapper methods which delegated +// to the default client, create a request and SetRetryCondition for request. +func SetRetryCondition(condition RetryConditionFunc) *Request { + return defaultClient.R().SetRetryCondition(condition) +} + +// AddRetryCondition is a global wrapper methods which delegated +// to the default client, create a request and AddRetryCondition for request. +func AddRetryCondition(condition RetryConditionFunc) *Request { + return defaultClient.R().AddRetryCondition(condition) +} + +// SetUploadCallback is a global wrapper methods which delegated +// to the default client, create a request and SetUploadCallback for request. +func SetUploadCallback(callback UploadCallback) *Request { + return defaultClient.R().SetUploadCallback(callback) +} + +// SetUploadCallbackWithInterval is a global wrapper methods which delegated +// to the default client, create a request and SetUploadCallbackWithInterval for request. +func SetUploadCallbackWithInterval(callback UploadCallback, minInterval time.Duration) *Request { + return defaultClient.R().SetUploadCallbackWithInterval(callback, minInterval) +} + +// SetDownloadCallback is a global wrapper methods which delegated +// to the default client, create a request and SetDownloadCallback for request. +func SetDownloadCallback(callback DownloadCallback) *Request { + return defaultClient.R().SetDownloadCallback(callback) +} + +// SetDownloadCallbackWithInterval is a global wrapper methods which delegated +// to the default client, create a request and SetDownloadCallbackWithInterval for request. +func SetDownloadCallbackWithInterval(callback DownloadCallback, minInterval time.Duration) *Request { + return defaultClient.R().SetDownloadCallbackWithInterval(callback, minInterval) +} + +// EnableCloseConnection is a global wrapper methods which delegated +// to the default client, create a request and EnableCloseConnection for request. +func EnableCloseConnection() *Request { + return defaultClient.R().EnableCloseConnection() +} diff --git a/resp.go b/resp.go deleted file mode 100644 index 03ed679e..00000000 --- a/resp.go +++ /dev/null @@ -1,205 +0,0 @@ -package req - -import ( - "encoding/json" - "encoding/xml" - "fmt" - "io" - "io/ioutil" - "net/http" - "os" - "regexp" - "time" -) - -// Resp represents a request with it's response -type Resp struct { - r *Req - req *http.Request - resp *http.Response - client *http.Client - *multipartHelper - reqBody []byte - respBody []byte - downloadProgress DownloadProgress - err error // delayed error -} - -// Request returns *http.Request -func (r *Resp) Request() *http.Request { - return r.req -} - -// Response returns *http.Response -func (r *Resp) Response() *http.Response { - return r.resp -} - -// Bytes returns response body as []byte -func (r *Resp) Bytes() []byte { - data, _ := r.ToBytes() - return data -} - -// ToBytes returns response body as []byte, -// return error if error happend when reading -// the response body -func (r *Resp) ToBytes() ([]byte, error) { - if r.err != nil { - return nil, r.err - } - if r.respBody != nil { - return r.respBody, nil - } - defer r.resp.Body.Close() - respBody, err := ioutil.ReadAll(r.resp.Body) - if err != nil { - r.err = err - return nil, err - } - r.respBody = respBody - return r.respBody, nil -} - -// String returns response body as string -func (r *Resp) String() string { - data, _ := r.ToBytes() - return string(data) -} - -// ToString returns response body as string, -// return error if error happend when reading -// the response body -func (r *Resp) ToString() (string, error) { - data, err := r.ToBytes() - return string(data), err -} - -// ToJSON convert json response body to struct or map -func (r *Resp) ToJSON(v interface{}) error { - data, err := r.ToBytes() - if err != nil { - return err - } - return json.Unmarshal(data, v) -} - -// ToXML convert xml response body to struct or map -func (r *Resp) ToXML(v interface{}) error { - data, err := r.ToBytes() - if err != nil { - return err - } - return xml.Unmarshal(data, v) -} - -// ToFile download the response body to file with optional download callback -func (r *Resp) ToFile(name string) error { - //TODO set name to the suffix of url path if name == "" - file, err := os.Create(name) - if err != nil { - return err - } - defer file.Close() - - if r.respBody != nil { - _, err = file.Write(r.respBody) - return err - } - - if r.downloadProgress != nil && r.resp.ContentLength > 0 { - return r.download(file) - } - - defer r.resp.Body.Close() - _, err = io.Copy(file, r.resp.Body) - return err -} - -func (r *Resp) download(file *os.File) error { - p := make([]byte, 1024) - b := r.resp.Body - defer b.Close() - total := r.resp.ContentLength - var current int64 - var lastTime time.Time - for { - l, err := b.Read(p) - if l > 0 { - _, _err := file.Write(p[:l]) - if _err != nil { - return _err - } - current += int64(l) - if now := time.Now(); now.Sub(lastTime) > 200*time.Millisecond { - lastTime = now - r.downloadProgress(current, total) - } - } - if err != nil { - if err == io.EOF { - return nil - } - return err - } - } -} - -var regNewline = regexp.MustCompile(`\n|\r`) - -func (r *Resp) autoFormat(s fmt.State) { - req := r.req - fmt.Fprint(s, req.Method, " ", req.URL.String()) - - // test if it is should be outputed pretty - var pretty bool - var parts []string - addPart := func(part string) { - if part == "" { - return - } - parts = append(parts, part) - if !pretty && regNewline.MatchString(part) { - pretty = true - } - } - if r.r.flag&LreqBody != 0 { // request body - addPart(string(r.reqBody)) - } - if r.r.flag&LrespBody != 0 { // response body - addPart(r.String()) - } - - for _, part := range parts { - if pretty { - fmt.Fprint(s, "\n") - } - fmt.Fprint(s, " ", part) - } -} - -func (r *Resp) miniFormat(s fmt.State) { - req := r.req - fmt.Fprint(s, req.Method, " ", req.URL.String()) - if r.r.flag&LreqBody != 0 && len(r.reqBody) > 0 { // request body - str := regNewline.ReplaceAllString(string(r.reqBody), " ") - fmt.Fprint(s, " ", str) - } - if r.r.flag&LrespBody != 0 && r.String() != "" { // response body - str := regNewline.ReplaceAllString(r.String(), " ") - fmt.Fprint(s, " ", str) - } -} - -func (r *Resp) Format(s fmt.State, verb rune) { - if r == nil || r.req == nil { - return - } - if s.Flag('+') { // include header and format pretty. - fmt.Fprint(s, r.Dump()) - } else if s.Flag('-') { // keep all informations in one line. - r.miniFormat(s) - } else { // auto - r.autoFormat(s) - } -} diff --git a/resp_test.go b/resp_test.go deleted file mode 100644 index 6e881a3b..00000000 --- a/resp_test.go +++ /dev/null @@ -1,130 +0,0 @@ -package req - -import ( - "encoding/json" - "encoding/xml" - "fmt" - "net/http" - "net/http/httptest" - "strings" - "testing" -) - -func TestToJSON(t *testing.T) { - type Result struct { - Code int `json:"code"` - Msg string `json:"msg"` - } - r1 := Result{ - Code: 1, - Msg: "ok", - } - handler := func(w http.ResponseWriter, r *http.Request) { - data, _ := json.Marshal(&r1) - w.Write(data) - } - ts := httptest.NewServer(http.HandlerFunc(handler)) - r, err := Get(ts.URL) - if err != nil { - t.Fatal(err) - } - var r2 Result - err = r.ToJSON(&r2) - if err != nil { - t.Fatal(err) - } - if r1 != r2 { - t.Errorf("json response body = %+v; want = %+v", r2, r1) - } -} - -func TestToXML(t *testing.T) { - type Result struct { - XMLName xml.Name - Code int `xml:"code"` - Msg string `xml:"msg"` - } - r1 := Result{ - XMLName: xml.Name{Local: "result"}, - Code: 1, - Msg: "ok", - } - handler := func(w http.ResponseWriter, r *http.Request) { - data, _ := xml.Marshal(&r1) - w.Write(data) - } - ts := httptest.NewServer(http.HandlerFunc(handler)) - r, err := Get(ts.URL) - if err != nil { - t.Fatal(err) - } - var r2 Result - err = r.ToXML(&r2) - if err != nil { - t.Fatal(err) - } - if r1 != r2 { - t.Errorf("xml response body = %+v; want = %+v", r2, r1) - } -} - -func TestFormat(t *testing.T) { - SetFlags(LstdFlags | Lcost) - reqHeader := "Request-Header" - respHeader := "Response-Header" - reqBody := "request body" - respBody1 := "response body 1" - respBody2 := "response body 2" - respBody := fmt.Sprintf("%s\n%s", respBody1, respBody2) - handler := func(w http.ResponseWriter, r *http.Request) { - w.Header().Set(respHeader, "req") - w.Write([]byte(respBody)) - } - ts := httptest.NewServer(http.HandlerFunc(handler)) - - // %v - r, err := Post(ts.URL, reqBody, Header{reqHeader: "hello"}) - if err != nil { - t.Fatal(err) - } - str := fmt.Sprintf("%v", r) - for _, keyword := range []string{ts.URL, reqBody, respBody} { - if !strings.Contains(str, keyword) { - t.Errorf("format %%v output lack of part, want: %s", keyword) - } - } - - // %-v - str = fmt.Sprintf("%-v", r) - for _, keyword := range []string{ts.URL, respBody1 + " " + respBody2} { - if !strings.Contains(str, keyword) { - t.Errorf("format %%-v output lack of part, want: %s", keyword) - } - } - - // %+v - str = fmt.Sprintf("%+v", r) - for _, keyword := range []string{reqBody, respBody, reqHeader, respHeader} { - if !strings.Contains(str, keyword) { - t.Errorf("format %%+v output lack of part, want: %s", keyword) - } - } -} - -func TestBytesAndString(t *testing.T) { - respBody := "response body" - handler := func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte(respBody)) - } - ts := httptest.NewServer(http.HandlerFunc(handler)) - r, err := Get(ts.URL) - if err != nil { - t.Fatal(err) - } - if string(r.Bytes()) != respBody { - t.Errorf("response body = %s; want = %s", r.Bytes(), respBody) - } - if r.String() != respBody { - t.Errorf("response body = %s; want = %s", r.String(), respBody) - } -} diff --git a/response.go b/response.go new file mode 100644 index 00000000..a8e99025 --- /dev/null +++ b/response.go @@ -0,0 +1,303 @@ +package req + +import ( + "io" + "net/http" + "strings" + "time" + + "github.com/imroc/req/v3/internal/header" + "github.com/imroc/req/v3/internal/util" +) + +// Response is the http response. +type Response struct { + // The underlying http.Response is embed into Response. + *http.Response + // Err is the underlying error, not nil if some error occurs. + // Usually used in the ResponseMiddleware, you can skip logic in + // ResponseMiddleware that doesn't need to be executed when err occurs. + Err error + // Request is the Response's related Request. + Request *Request + body []byte + receivedAt time.Time + error interface{} + result interface{} +} + +// IsSuccess method returns true if no error occurs and HTTP status `code >= 200 and <= 299` +// by default, you can also use Request.SetResultStateCheckFunc to customize the result +// state check logic. +// +// Deprecated: Use IsSuccessState instead. +func (r *Response) IsSuccess() bool { + return r.IsSuccessState() +} + +// IsSuccessState method returns true if no error occurs and HTTP status `code >= 200 and <= 299` +// by default, you can also use Request.SetResultStateCheckFunc to customize the result state +// check logic. +func (r *Response) IsSuccessState() bool { + if r.Response == nil { + return false + } + return r.ResultState() == SuccessState +} + +// IsError method returns true if no error occurs and HTTP status `code >= 400` +// by default, you can also use Request.SetResultStateCheckFunc to customize the result +// state check logic. +// +// Deprecated: Use IsErrorState instead. +func (r *Response) IsError() bool { + return r.IsErrorState() +} + +// IsErrorState method returns true if no error occurs and HTTP status `code >= 400` +// by default, you can also use Request.SetResultStateCheckFunc to customize the result +// state check logic. +func (r *Response) IsErrorState() bool { + if r.Response == nil { + return false + } + return r.ResultState() == ErrorState +} + +// GetContentType return the `Content-Type` header value. +func (r *Response) GetContentType() string { + if r.Response == nil { + return "" + } + return r.Header.Get(header.ContentType) +} + +// ResultState returns the result state. +// By default, it returns SuccessState if HTTP status `code >= 400`, and returns +// ErrorState if HTTP status `code >= 400`, otherwise returns UnknownState. +// You can also use Request.SetResultStateCheckFunc or Client.SetResultStateCheckFunc +// to customize the result state check logic. +func (r *Response) ResultState() ResultState { + if r.Response == nil { + return UnknownState + } + var resultStateCheckFunc func(resp *Response) ResultState + if r.Request.client.resultStateCheckFunc != nil { + resultStateCheckFunc = r.Request.client.resultStateCheckFunc + } else { + resultStateCheckFunc = defaultResultStateChecker + } + return resultStateCheckFunc(r) +} + +// Result returns the automatically unmarshalled object if Request.SetSuccessResult +// is called and ResultState returns SuccessState. +// Otherwise, return nil. +// +// Deprecated: Use SuccessResult instead. +func (r *Response) Result() interface{} { + return r.SuccessResult() +} + +// SuccessResult returns the automatically unmarshalled object if Request.SetSuccessResult +// is called and ResultState returns SuccessState. +// Otherwise, return nil. +func (r *Response) SuccessResult() interface{} { + return r.result +} + +// Error returns the automatically unmarshalled object when Request.SetErrorResult +// or Client.SetCommonErrorResult is called, and ResultState returns ErrorState. +// Otherwise, return nil. +// +// Deprecated: Use ErrorResult instead. +func (r *Response) Error() interface{} { + return r.error +} + +// ErrorResult returns the automatically unmarshalled object when Request.SetErrorResult +// or Client.SetCommonErrorResult is called, and ResultState returns ErrorState. +// Otherwise, return nil. +func (r *Response) ErrorResult() interface{} { + return r.error +} + +// TraceInfo returns the TraceInfo from Request. +func (r *Response) TraceInfo() TraceInfo { + return r.Request.TraceInfo() +} + +// TotalTime returns the total time of the request, from request we sent to response we received. +func (r *Response) TotalTime() time.Duration { + if r.Request.trace != nil { + return r.Request.TraceInfo().TotalTime + } + if !r.receivedAt.IsZero() { + return r.receivedAt.Sub(r.Request.StartTime) + } + return r.Request.responseReturnTime.Sub(r.Request.StartTime) +} + +// ReceivedAt returns the timestamp that response we received. +func (r *Response) ReceivedAt() time.Time { + return r.receivedAt +} + +func (r *Response) setReceivedAt() { + r.receivedAt = time.Now() + if r.Request.trace != nil { + r.Request.trace.endTime = r.receivedAt + } +} + +// UnmarshalJson unmarshalls JSON response body into the specified object. +func (r *Response) UnmarshalJson(v interface{}) error { + if r.Err != nil { + return r.Err + } + b, err := r.ToBytes() + if err != nil { + return err + } + return r.Request.client.jsonUnmarshal(b, v) +} + +// UnmarshalXml unmarshalls XML response body into the specified object. +func (r *Response) UnmarshalXml(v interface{}) error { + if r.Err != nil { + return r.Err + } + b, err := r.ToBytes() + if err != nil { + return err + } + return r.Request.client.xmlUnmarshal(b, v) +} + +// Unmarshal unmarshalls response body into the specified object according +// to response `Content-Type`. +func (r *Response) Unmarshal(v interface{}) error { + if r.Err != nil { + return r.Err + } + v = util.GetPointer(v) + contentType := r.Header.Get("Content-Type") + if strings.Contains(contentType, "json") { + return r.UnmarshalJson(v) + } else if strings.Contains(contentType, "xml") { + return r.UnmarshalXml(v) + } + return r.UnmarshalJson(v) +} + +// Into unmarshalls response body into the specified object according +// to response `Content-Type`. +func (r *Response) Into(v interface{}) error { + return r.Unmarshal(v) +} + +// Set response body with byte array content +func (r *Response) SetBody(body []byte) { + r.body = body +} + +// Set response body with string content +func (r *Response) SetBodyString(body string) { + r.body = []byte(body) +} + +// Bytes return the response body as []bytes that have already been read, could be +// nil if not read, the following cases are already read: +// 1. `Request.SetResult` or `Request.SetError` is called. +// 2. `Client.DisableAutoReadResponse` and `Request.DisableAutoReadResponse` is not +// called, and also `Request.SetOutput` and `Request.SetOutputFile` is not called. +func (r *Response) Bytes() []byte { + return r.body +} + +// String returns the response body as string that have already been read, could be +// nil if not read, the following cases are already read: +// 1. `Request.SetResult` or `Request.SetError` is called. +// 2. `Client.DisableAutoReadResponse` and `Request.DisableAutoReadResponse` is not +// called, and also `Request.SetOutput` and `Request.SetOutputFile` is not called. +func (r *Response) String() string { + return string(r.body) +} + +// ToString returns the response body as string, read body if not have been read. +func (r *Response) ToString() (string, error) { + b, err := r.ToBytes() + return string(b), err +} + +// ToBytes returns the response body as []byte, read body if not have been read. +func (r *Response) ToBytes() (body []byte, err error) { + if r.Err != nil { + return nil, r.Err + } + if r.body != nil { + return r.body, nil + } + if r.Response == nil || r.Response.Body == nil { + return []byte{}, nil + } + defer func() { + r.Body.Close() + if err != nil { + r.Err = err + } + r.body = body + }() + body, err = io.ReadAll(r.Body) + r.setReceivedAt() + if err == nil && r.Request.client.responseBodyTransformer != nil { + body, err = r.Request.client.responseBodyTransformer(body, r.Request, r) + } + return +} + +// Dump return the string content that have been dumped for the request. +// `Request.Dump` or `Request.DumpXXX` MUST have been called. +func (r *Response) Dump() string { + return r.Request.getDumpBuffer().String() +} + +// GetStatus returns the response status. +func (r *Response) GetStatus() string { + if r.Response == nil { + return "" + } + return r.Status +} + +// GetStatusCode returns the response status code. +func (r *Response) GetStatusCode() int { + if r.Response == nil { + return 0 + } + return r.StatusCode +} + +// GetHeader returns the response header value by key. +func (r *Response) GetHeader(key string) string { + if r.Response == nil { + return "" + } + return r.Header.Get(key) +} + +// GetHeaderValues returns the response header values by key. +func (r *Response) GetHeaderValues(key string) []string { + if r.Response == nil { + return nil + } + return r.Header.Values(key) +} + +// HeaderToString get all header as string. +func (r *Response) HeaderToString() string { + if r.Response == nil { + return "" + } + return convertHeaderToString(r.Header) +} diff --git a/retry.go b/retry.go new file mode 100644 index 00000000..fa67c843 --- /dev/null +++ b/retry.go @@ -0,0 +1,59 @@ +package req + +import ( + "math" + "math/rand" + "time" +) + +func defaultGetRetryInterval(resp *Response, attempt int) time.Duration { + return 100 * time.Millisecond +} + +// RetryConditionFunc is a retry condition, which determines +// whether the request should retry. +type RetryConditionFunc func(resp *Response, err error) bool + +// RetryHookFunc is a retry hook which will be executed before a retry. +type RetryHookFunc func(resp *Response, err error) + +// GetRetryIntervalFunc is a function that determines how long should +// sleep between retry attempts. +type GetRetryIntervalFunc func(resp *Response, attempt int) time.Duration + +func backoffInterval(min, max time.Duration) GetRetryIntervalFunc { + base := float64(min) + capLevel := float64(max) + return func(resp *Response, attempt int) time.Duration { + temp := math.Min(capLevel, base*math.Exp2(float64(attempt))) + halfTemp := int64(temp / 2) + sleep := halfTemp + rand.Int63n(halfTemp) + return time.Duration(sleep) + } +} + +func newDefaultRetryOption() *retryOption { + return &retryOption{ + GetRetryInterval: defaultGetRetryInterval, + } +} + +type retryOption struct { + MaxRetries int + GetRetryInterval GetRetryIntervalFunc + RetryConditions []RetryConditionFunc + RetryHooks []RetryHookFunc +} + +func (ro *retryOption) Clone() *retryOption { + if ro == nil { + return nil + } + o := &retryOption{ + MaxRetries: ro.MaxRetries, + GetRetryInterval: ro.GetRetryInterval, + } + o.RetryConditions = append(o.RetryConditions, ro.RetryConditions...) + o.RetryHooks = append(o.RetryHooks, ro.RetryHooks...) + return o +} diff --git a/retry_test.go b/retry_test.go new file mode 100644 index 00000000..5814b5fe --- /dev/null +++ b/retry_test.go @@ -0,0 +1,198 @@ +package req + +import ( + "bytes" + "io" + "math" + "net/http" + "testing" + "time" + + "github.com/imroc/req/v3/internal/tests" +) + +func TestRetryBackOff(t *testing.T) { + testRetry(t, func(r *Request) { + r.SetRetryBackoffInterval(10*time.Millisecond, 1*time.Second) + }) +} + +func testRetry(t *testing.T, setFunc func(r *Request)) { + attempt := 0 + r := tc().R(). + SetRetryCount(3). + SetRetryCondition(func(resp *Response, err error) bool { + return (err != nil) || (resp.StatusCode == http.StatusTooManyRequests) + }). + SetRetryHook(func(resp *Response, err error) { + attempt++ + }) + setFunc(r) + resp, err := r.Get("/too-many") + tests.AssertNoError(t, err) + tests.AssertEqual(t, 3, resp.Request.RetryAttempt) + tests.AssertEqual(t, 3, attempt) +} + +func TestRetryInterval(t *testing.T) { + testRetry(t, func(r *Request) { + r.SetRetryInterval(func(resp *Response, attempt int) time.Duration { + sleep := 0.01 * math.Exp2(float64(attempt)) + return time.Duration(math.Min(2, sleep)) * time.Second + }) + }) +} + +func TestRetryFixedInterval(t *testing.T) { + testRetry(t, func(r *Request) { + r.SetRetryFixedInterval(1 * time.Millisecond) + }) +} + +func TestAddRetryHook(t *testing.T) { + test := "test1" + testRetry(t, func(r *Request) { + r.AddRetryHook(func(resp *Response, err error) { + test = "test2" + }) + }) + tests.AssertEqual(t, "test2", test) +} + +func TestRetryOverride(t *testing.T) { + c := tc(). + SetCommonRetryCount(3). + SetCommonRetryHook(func(resp *Response, err error) {}). + AddCommonRetryHook(func(resp *Response, err error) {}). + SetCommonRetryCondition(func(resp *Response, err error) bool { + return false + }).SetCommonRetryBackoffInterval(1*time.Millisecond, 10*time.Millisecond) + test := "test" + resp, err := c.R().SetRetryFixedInterval(2 * time.Millisecond). + SetRetryCount(2). + SetRetryHook(func(resp *Response, err error) { + test = "test1" + }).SetRetryCondition(func(resp *Response, err error) bool { + return err != nil || resp.StatusCode == http.StatusTooManyRequests + }).Get("/too-many") + tests.AssertNoError(t, err) + tests.AssertEqual(t, "test1", test) + tests.AssertEqual(t, 2, resp.Request.RetryAttempt) +} + +func TestAddRetryCondition(t *testing.T) { + attempt := 0 + resp, err := tc().R(). + SetRetryCount(3). + AddRetryCondition(func(resp *Response, err error) bool { + return err != nil + }). + AddRetryCondition(func(resp *Response, err error) bool { + return resp.StatusCode == http.StatusServiceUnavailable + }). + SetRetryHook(func(resp *Response, err error) { + attempt++ + }).Get("/too-many") + tests.AssertNoError(t, err) + tests.AssertEqual(t, 0, attempt) + tests.AssertEqual(t, 0, resp.Request.RetryAttempt) + + attempt = 0 + resp, err = tc(). + SetCommonRetryCount(3). + AddCommonRetryCondition(func(resp *Response, err error) bool { + return err != nil + }). + AddCommonRetryCondition(func(resp *Response, err error) bool { + return resp.StatusCode == http.StatusServiceUnavailable + }). + SetCommonRetryHook(func(resp *Response, err error) { + attempt++ + }).R().Get("/too-many") + tests.AssertNoError(t, err) + tests.AssertEqual(t, 0, attempt) + tests.AssertEqual(t, 0, resp.Request.RetryAttempt) + +} + +func TestRetryWithUnreplayableBody(t *testing.T) { + _, err := tc().R(). + SetRetryCount(1). + SetBody(bytes.NewBufferString("test")). + Post("/") + tests.AssertEqual(t, errRetryableWithUnReplayableBody, err) + + _, err = tc().R(). + SetRetryCount(1). + SetBody(io.NopCloser(bytes.NewBufferString("test"))). + Post("/") + tests.AssertEqual(t, errRetryableWithUnReplayableBody, err) +} + +func TestRetryWithSetResult(t *testing.T) { + headers := make(http.Header) + resp, err := tc().SetCommonCookies(&http.Cookie{ + Name: "test", + Value: "test", + }).R(). + SetRetryCount(1). + SetResult(&headers). + Get("/header") + assertSuccess(t, resp, err) + tests.AssertEqual(t, "test=test", headers.Get("Cookie")) +} + +func TestRetryWithModify(t *testing.T) { + tokens := []string{"badtoken1", "badtoken2", "goodtoken"} + tokenIndex := 0 + c := tc(). + SetCommonRetryCount(2). + SetCommonRetryHook(func(resp *Response, err error) { + tokenIndex++ + resp.Request.SetBearerAuthToken(tokens[tokenIndex]) + }).SetCommonRetryCondition(func(resp *Response, err error) bool { + return err != nil || resp.StatusCode == http.StatusUnauthorized + }) + + resp, err := c.R(). + SetBearerAuthToken(tokens[tokenIndex]). + Get("/protected") + assertSuccess(t, resp, err) + tests.AssertEqual(t, 2, resp.Request.RetryAttempt) +} + +func TestRetryFalse(t *testing.T) { + resp, err := tc().SetTimeout(2 * time.Second).R(). + SetRetryCount(1). + SetRetryCondition(func(resp *Response, err error) bool { + return false + }).Get("https://non-exists-host.com.cn") + tests.AssertNotNil(t, err) + tests.AssertIsNil(t, resp.Response) + tests.AssertEqual(t, 0, resp.Request.RetryAttempt) +} + +func TestRetryTurnedOffWhenRetryCountEqZero(t *testing.T) { + resp, err := tc().SetTimeout(2 * time.Second).R(). + SetRetryCount(0). + SetRetryCondition(func(resp *Response, err error) bool { + t.Fatal("retry condition should not be executed") + return true + }). + Get("https://non-exists-host.com.cn") + tests.AssertNotNil(t, err) + tests.AssertIsNil(t, resp.Response) + tests.AssertEqual(t, 0, resp.Request.RetryAttempt) + + resp, err = tc().SetTimeout(2 * time.Second). + SetCommonRetryCount(0). + SetCommonRetryCondition(func(resp *Response, err error) bool { + t.Fatal("retry condition should not be executed") + return true + }). + R(). + Get("https://non-exists-host.com.cn") + tests.AssertNotNil(t, err) + tests.AssertIsNil(t, resp.Response) + tests.AssertEqual(t, 0, resp.Request.RetryAttempt) +} diff --git a/roundtrip.go b/roundtrip.go new file mode 100644 index 00000000..69953742 --- /dev/null +++ b/roundtrip.go @@ -0,0 +1,36 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !js + +package req + +import ( + "net/http" +) + +// RoundTrip implements the RoundTripper interface. +// +// For higher-level HTTP client support (such as handling of cookies +// and redirects), see Get, Post, and the Client type. +// +// Like the RoundTripper interface, the error types returned +// by RoundTrip are unspecified. +func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error) { + if t.wrappedRoundTrip != nil { + resp, err = t.wrappedRoundTrip.RoundTrip(req) + } else { + resp, err = t.roundTrip(req) + } + if err != nil { + return + } + if resp.ProtoMajor != 3 && t.altSvcJar != nil { + if v := resp.Header.Get("alt-svc"); v != "" { + t.handleAltSvc(req, v) + } + } + t.handleResponseBody(resp, req) + return +} diff --git a/roundtrip_js.go b/roundtrip_js.go new file mode 100644 index 00000000..9c6b6c4a --- /dev/null +++ b/roundtrip_js.go @@ -0,0 +1,373 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build js && wasm + +package req + +import ( + "errors" + "fmt" + "io" + "net/http" + "strconv" + "strings" + "syscall/js" + + "github.com/imroc/req/v3/internal/ascii" +) + +var uint8Array = js.Global().Get("Uint8Array") + +// jsFetchMode is a Request.Header map key that, if present, +// signals that the map entry is actually an option to the Fetch API mode setting. +// Valid values are: "cors", "no-cors", "same-origin", "navigate" +// The default is "same-origin". +// +// Reference: https://developer.mozilla.org/en-US/docs/Web/API/WindowOrWorkerGlobalScope/fetch#Parameters +const jsFetchMode = "js.fetch:mode" + +// jsFetchCreds is a Request.Header map key that, if present, +// signals that the map entry is actually an option to the Fetch API credentials setting. +// Valid values are: "omit", "same-origin", "include" +// The default is "same-origin". +// +// Reference: https://developer.mozilla.org/en-US/docs/Web/API/WindowOrWorkerGlobalScope/fetch#Parameters +const jsFetchCreds = "js.fetch:credentials" + +// jsFetchRedirect is a Request.Header map key that, if present, +// signals that the map entry is actually an option to the Fetch API redirect setting. +// Valid values are: "follow", "error", "manual" +// The default is "follow". +// +// Reference: https://developer.mozilla.org/en-US/docs/Web/API/WindowOrWorkerGlobalScope/fetch#Parameters +const jsFetchRedirect = "js.fetch:redirect" + +// jsFetchMissing will be true if the Fetch API is not present in +// the browser globals. +var jsFetchMissing = js.Global().Get("fetch").IsUndefined() + +// jsFetchDisabled controls whether the use of Fetch API is disabled. +// It's set to true when we detect we're running in Node.js, so that +// RoundTrip ends up talking over the same fake network the HTTP servers +// currently use in various tests and examples. See go.dev/issue/57613. +// +// TODO(go.dev/issue/60810): See if it's viable to test the Fetch API +// code path. +var jsFetchDisabled = js.Global().Get("process").Type() == js.TypeObject && + strings.HasPrefix(js.Global().Get("process").Get("argv0").String(), "node") + +// RoundTrip implements the [RoundTripper] interface using the WHATWG Fetch API. +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + // The Transport has a documented contract that states that if the DialContext or + // DialTLSContext functions are set, they will be used to set up the connections. + // If they aren't set then the documented contract is to use Dial or DialTLS, even + // though they are deprecated. Therefore, if any of these are set, we should obey + // the contract and dial using the regular round-trip instead. Otherwise, we'll try + // to fall back on the Fetch API, unless it's not available. + if t.DialContext != nil || t.DialTLSContext != nil || jsFetchMissing || jsFetchDisabled { + return t.roundTrip(req) + } + + ac := js.Global().Get("AbortController") + if !ac.IsUndefined() { + // Some browsers that support WASM don't necessarily support + // the AbortController. See + // https://developer.mozilla.org/en-US/docs/Web/API/AbortController#Browser_compatibility. + ac = ac.New() + } + + opt := js.Global().Get("Object").New() + // See https://developer.mozilla.org/en-US/docs/Web/API/WindowOrWorkerGlobalScope/fetch + // for options available. + opt.Set("method", req.Method) + opt.Set("credentials", "same-origin") + if h := req.Header.Get(jsFetchCreds); h != "" { + opt.Set("credentials", h) + req.Header.Del(jsFetchCreds) + } + if h := req.Header.Get(jsFetchMode); h != "" { + opt.Set("mode", h) + req.Header.Del(jsFetchMode) + } + if h := req.Header.Get(jsFetchRedirect); h != "" { + opt.Set("redirect", h) + req.Header.Del(jsFetchRedirect) + } + if !ac.IsUndefined() { + opt.Set("signal", ac.Get("signal")) + } + headers := js.Global().Get("Headers").New() + for key, values := range req.Header { + for _, value := range values { + headers.Call("append", key, value) + } + } + opt.Set("headers", headers) + + if req.Body != nil { + // TODO(johanbrandhorst): Stream request body when possible. + // See https://bugs.chromium.org/p/chromium/issues/detail?id=688906 for Blink issue. + // See https://bugzilla.mozilla.org/show_bug.cgi?id=1387483 for Firefox issue. + // See https://github.com/web-platform-tests/wpt/issues/7693 for WHATWG tests issue. + // See https://developer.mozilla.org/en-US/docs/Web/API/Streams_API for more details on the Streams API + // and browser support. + // NOTE(haruyama480): Ensure HTTP/1 fallback exists. + // See https://go.dev/issue/61889 for discussion. + body, err := io.ReadAll(req.Body) + if err != nil { + req.Body.Close() // RoundTrip must always close the body, including on errors. + return nil, err + } + req.Body.Close() + if len(body) != 0 { + buf := uint8Array.New(len(body)) + js.CopyBytesToJS(buf, body) + opt.Set("body", buf) + } + } + + fetchPromise := js.Global().Call("fetch", req.URL.String(), opt) + var ( + respCh = make(chan *http.Response, 1) + errCh = make(chan error, 1) + success, failure js.Func + ) + success = js.FuncOf(func(this js.Value, args []js.Value) any { + success.Release() + failure.Release() + + result := args[0] + header := http.Header{} + // https://developer.mozilla.org/en-US/docs/Web/API/Headers/entries + headersIt := result.Get("headers").Call("entries") + for { + n := headersIt.Call("next") + if n.Get("done").Bool() { + break + } + pair := n.Get("value") + key, value := pair.Index(0).String(), pair.Index(1).String() + ck := http.CanonicalHeaderKey(key) + header[ck] = append(header[ck], value) + } + + contentLength := int64(0) + clHeader := header.Get("Content-Length") + switch { + case clHeader != "": + cl, err := strconv.ParseInt(clHeader, 10, 64) + if err != nil { + errCh <- fmt.Errorf("net/http: ill-formed Content-Length header: %v", err) + return nil + } + if cl < 0 { + // Content-Length values less than 0 are invalid. + // See: https://datatracker.ietf.org/doc/html/rfc2616/#section-14.13 + errCh <- fmt.Errorf("net/http: invalid Content-Length header: %q", clHeader) + return nil + } + contentLength = cl + default: + // If the response length is not declared, set it to -1. + contentLength = -1 + } + + b := result.Get("body") + var body io.ReadCloser + // The body is undefined when the browser does not support streaming response bodies (Firefox), + // and null in certain error cases, i.e. when the request is blocked because of CORS settings. + if !b.IsUndefined() && !b.IsNull() { + body = &streamReader{stream: b.Call("getReader")} + } else { + // Fall back to using ArrayBuffer + // https://developer.mozilla.org/en-US/docs/Web/API/Body/arrayBuffer + body = &arrayReader{arrayPromise: result.Call("arrayBuffer")} + } + + code := result.Get("status").Int() + + uncompressed := false + if ascii.EqualFold(header.Get("Content-Encoding"), "gzip") { + // The fetch api will decode the gzip, but Content-Encoding not be deleted. + header.Del("Content-Encoding") + header.Del("Content-Length") + contentLength = -1 + uncompressed = true + } + respCh <- &http.Response{ + Status: fmt.Sprintf("%d %s", code, http.StatusText(code)), + StatusCode: code, + Header: header, + ContentLength: contentLength, + Uncompressed: uncompressed, + Body: body, + Request: req, + } + + return nil + }) + failure = js.FuncOf(func(this js.Value, args []js.Value) any { + success.Release() + failure.Release() + + err := args[0] + // The error is a JS Error type + // https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Error + // We can use the toString() method to get a string representation of the error. + errMsg := err.Call("toString").String() + // Errors can optionally contain a cause. + if cause := err.Get("cause"); !cause.IsUndefined() { + // The exact type of the cause is not defined, + // but if it's another error, we can call toString() on it too. + if !cause.Get("toString").IsUndefined() { + errMsg += ": " + cause.Call("toString").String() + } else if cause.Type() == js.TypeString { + errMsg += ": " + cause.String() + } + } + errCh <- fmt.Errorf("net/http: fetch() failed: %s", errMsg) + return nil + }) + + fetchPromise.Call("then", success, failure) + select { + case <-req.Context().Done(): + if !ac.IsUndefined() { + // Abort the Fetch request. + ac.Call("abort") + } + return nil, req.Context().Err() + case resp := <-respCh: + return resp, nil + case err := <-errCh: + return nil, err + } +} + +var errClosed = errors.New("net/http: reader is closed") + +// streamReader implements an io.ReadCloser wrapper for ReadableStream. +// See https://fetch.spec.whatwg.org/#readablestream for more information. +type streamReader struct { + pending []byte + stream js.Value + err error // sticky read error +} + +func (r *streamReader) Read(p []byte) (n int, err error) { + if r.err != nil { + return 0, r.err + } + if len(r.pending) == 0 { + var ( + bCh = make(chan []byte, 1) + errCh = make(chan error, 1) + ) + success := js.FuncOf(func(this js.Value, args []js.Value) interface{} { + result := args[0] + if result.Get("done").Bool() { + errCh <- io.EOF + return nil + } + value := make([]byte, result.Get("value").Get("byteLength").Int()) + js.CopyBytesToGo(value, result.Get("value")) + bCh <- value + return nil + }) + defer success.Release() + failure := js.FuncOf(func(this js.Value, args []js.Value) interface{} { + // Assumes it's a TypeError. See + // https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypeError + // for more information on this type. See + // https://streams.spec.whatwg.org/#byob-reader-read for the spec on + // the read method. + errCh <- errors.New(args[0].Get("message").String()) + return nil + }) + defer failure.Release() + r.stream.Call("read").Call("then", success, failure) + select { + case b := <-bCh: + r.pending = b + case err := <-errCh: + r.err = err + return 0, err + } + } + n = copy(p, r.pending) + r.pending = r.pending[n:] + return n, nil +} + +func (r *streamReader) Close() error { + // This ignores any error returned from cancel method. So far, I did not encounter any concrete + // situation where reporting the error is meaningful. Most users ignore error from resp.Body.Close(). + // If there's a need to report error here, it can be implemented and tested when that need comes up. + r.stream.Call("cancel") + if r.err == nil { + r.err = errClosed + } + return nil +} + +// arrayReader implements an io.ReadCloser wrapper for ArrayBuffer. +// https://developer.mozilla.org/en-US/docs/Web/API/Body/arrayBuffer. +type arrayReader struct { + arrayPromise js.Value + pending []byte + read bool + err error // sticky read error +} + +func (r *arrayReader) Read(p []byte) (n int, err error) { + if r.err != nil { + return 0, r.err + } + if !r.read { + r.read = true + var ( + bCh = make(chan []byte, 1) + errCh = make(chan error, 1) + ) + success := js.FuncOf(func(this js.Value, args []js.Value) interface{} { + // Wrap the input ArrayBuffer with a Uint8Array + uint8arrayWrapper := uint8Array.New(args[0]) + value := make([]byte, uint8arrayWrapper.Get("byteLength").Int()) + js.CopyBytesToGo(value, uint8arrayWrapper) + bCh <- value + return nil + }) + defer success.Release() + failure := js.FuncOf(func(this js.Value, args []js.Value) interface{} { + // Assumes it's a TypeError. See + // https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypeError + // for more information on this type. + // See https://fetch.spec.whatwg.org/#concept-body-consume-body for reasons this might error. + errCh <- errors.New(args[0].Get("message").String()) + return nil + }) + defer failure.Release() + r.arrayPromise.Call("then", success, failure) + select { + case b := <-bCh: + r.pending = b + case err := <-errCh: + return 0, err + } + } + if len(r.pending) == 0 { + return 0, io.EOF + } + n = copy(p, r.pending) + r.pending = r.pending[n:] + return n, nil +} + +func (r *arrayReader) Close() error { + if r.err == nil { + r.err = errClosed + } + return nil +} diff --git a/server.go b/server.go new file mode 100644 index 00000000..8cd25f11 --- /dev/null +++ b/server.go @@ -0,0 +1,18 @@ +package req + +import "sync" + +const copyBufPoolSize = 32 * 1024 + +var copyBufPool = sync.Pool{New: func() any { return new([copyBufPoolSize]byte) }} + +func getCopyBuf() []byte { + return copyBufPool.Get().(*[copyBufPoolSize]byte)[:] +} + +func putCopyBuf(b []byte) { + if len(b) != copyBufPoolSize { + panic("trying to put back buffer of the wrong size in the copyBufPool") + } + copyBufPool.Put((*[copyBufPoolSize]byte)(b)) +} diff --git a/setting.go b/setting.go deleted file mode 100644 index 74235f37..00000000 --- a/setting.go +++ /dev/null @@ -1,236 +0,0 @@ -package req - -import ( - "crypto/tls" - "errors" - "net" - "net/http" - "net/http/cookiejar" - "net/url" - "time" -) - -// create a default client -func newClient() *http.Client { - jar, _ := cookiejar.New(nil) - transport := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - DualStack: true, - }).DialContext, - MaxIdleConns: 100, - IdleConnTimeout: 90 * time.Second, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - } - return &http.Client{ - Jar: jar, - Transport: transport, - Timeout: 2 * time.Minute, - } -} - -// Client return the default underlying http client -func (r *Req) Client() *http.Client { - if r.client == nil { - r.client = newClient() - } - return r.client -} - -// Client return the default underlying http client -func Client() *http.Client { - return std.Client() -} - -// SetClient sets the underlying http.Client. -func (r *Req) SetClient(client *http.Client) { - r.client = client // use default if client == nil -} - -// SetClient sets the default http.Client for requests. -func SetClient(client *http.Client) { - std.SetClient(client) -} - -// SetFlags control display format of *Resp -func (r *Req) SetFlags(flags int) { - r.flag = flags -} - -// SetFlags control display format of *Resp -func SetFlags(flags int) { - std.SetFlags(flags) -} - -// Flags return output format for the *Resp -func (r *Req) Flags() int { - return r.flag -} - -// Flags return output format for the *Resp -func Flags() int { - return std.Flags() -} - -func (r *Req) getTransport() *http.Transport { - trans, _ := r.Client().Transport.(*http.Transport) - return trans -} - -// EnableInsecureTLS allows insecure https -func (r *Req) EnableInsecureTLS(enable bool) { - trans := r.getTransport() - if trans == nil { - return - } - if trans.TLSClientConfig == nil { - trans.TLSClientConfig = &tls.Config{} - } - trans.TLSClientConfig.InsecureSkipVerify = enable -} - -func EnableInsecureTLS(enable bool) { - std.EnableInsecureTLS(enable) -} - -// EnableCookieenable or disable cookie manager -func (r *Req) EnableCookie(enable bool) { - if enable { - jar, _ := cookiejar.New(nil) - r.Client().Jar = jar - } else { - r.Client().Jar = nil - } -} - -// EnableCookieenable or disable cookie manager -func EnableCookie(enable bool) { - std.EnableCookie(enable) -} - -// SetTimeout sets the timeout for every request -func (r *Req) SetTimeout(d time.Duration) { - r.Client().Timeout = d -} - -// SetTimeout sets the timeout for every request -func SetTimeout(d time.Duration) { - std.SetTimeout(d) -} - -// SetProxyUrl set the simple proxy with fixed proxy url -func (r *Req) SetProxyUrl(rawurl string) error { - trans := r.getTransport() - if trans == nil { - return errors.New("req: no transport") - } - u, err := url.Parse(rawurl) - if err != nil { - return err - } - trans.Proxy = http.ProxyURL(u) - return nil -} - -// SetProxyUrl set the simple proxy with fixed proxy url -func SetProxyUrl(rawurl string) error { - return std.SetProxyUrl(rawurl) -} - -// SetProxy sets the proxy for every request -func (r *Req) SetProxy(proxy func(*http.Request) (*url.URL, error)) error { - trans := r.getTransport() - if trans == nil { - return errors.New("req: no transport") - } - trans.Proxy = proxy - return nil -} - -// SetProxy sets the proxy for every request -func SetProxy(proxy func(*http.Request) (*url.URL, error)) error { - return std.SetProxy(proxy) -} - -type jsonEncOpts struct { - indentPrefix string - indentValue string - escapeHTML bool -} - -func (r *Req) getJSONEncOpts() *jsonEncOpts { - if r.jsonEncOpts == nil { - r.jsonEncOpts = &jsonEncOpts{escapeHTML: true} - } - return r.jsonEncOpts -} - -// SetJSONEscapeHTML specifies whether problematic HTML characters -// should be escaped inside JSON quoted strings. -// The default behavior is to escape &, <, and > to \u0026, \u003c, and \u003e -// to avoid certain safety problems that can arise when embedding JSON in HTML. -// -// In non-HTML settings where the escaping interferes with the readability -// of the output, SetEscapeHTML(false) disables this behavior. -func (r *Req) SetJSONEscapeHTML(escape bool) { - opts := r.getJSONEncOpts() - opts.escapeHTML = escape -} - -// SetJSONEscapeHTML specifies whether problematic HTML characters -// should be escaped inside JSON quoted strings. -// The default behavior is to escape &, <, and > to \u0026, \u003c, and \u003e -// to avoid certain safety problems that can arise when embedding JSON in HTML. -// -// In non-HTML settings where the escaping interferes with the readability -// of the output, SetEscapeHTML(false) disables this behavior. -func SetJSONEscapeHTML(escape bool) { - std.SetJSONEscapeHTML(escape) -} - -// SetJSONIndent instructs the encoder to format each subsequent encoded -// value as if indented by the package-level function Indent(dst, src, prefix, indent). -// Calling SetIndent("", "") disables indentation. -func (r *Req) SetJSONIndent(prefix, indent string) { - opts := r.getJSONEncOpts() - opts.indentPrefix = prefix - opts.indentValue = indent -} - -// SetJSONIndent instructs the encoder to format each subsequent encoded -// value as if indented by the package-level function Indent(dst, src, prefix, indent). -// Calling SetIndent("", "") disables indentation. -func SetJSONIndent(prefix, indent string) { - std.SetJSONIndent(prefix, indent) -} - -type xmlEncOpts struct { - prefix string - indent string -} - -func (r *Req) getXMLEncOpts() *xmlEncOpts { - if r.xmlEncOpts == nil { - r.xmlEncOpts = &xmlEncOpts{} - } - return r.xmlEncOpts -} - -// SetXMLIndent sets the encoder to generate XML in which each element -// begins on a new indented line that starts with prefix and is followed by -// one or more copies of indent according to the nesting depth. -func (r *Req) SetXMLIndent(prefix, indent string) { - opts := r.getXMLEncOpts() - opts.prefix = prefix - opts.indent = indent -} - -// SetXMLIndent sets the encoder to generate XML in which each element -// begins on a new indented line that starts with prefix and is followed by -// one or more copies of indent according to the nesting depth. -func SetXMLIndent(prefix, indent string) { - std.SetXMLIndent(prefix, indent) -} diff --git a/setting_test.go b/setting_test.go deleted file mode 100644 index e71a6d7d..00000000 --- a/setting_test.go +++ /dev/null @@ -1,62 +0,0 @@ -package req - -import ( - "net/http" - "net/http/httptest" - "testing" - "time" -) - -func newDefaultTestServer() *httptest.Server { - handler := func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte("hi")) - } - return httptest.NewServer(http.HandlerFunc(handler)) -} - -func TestSetClient(t *testing.T) { - - ts := newDefaultTestServer() - - client := &http.Client{} - SetClient(client) - _, err := Get(ts.URL) - if err != nil { - t.Errorf("error after set client: %v", err) - } - - SetClient(nil) - _, err = Get(ts.URL) - if err != nil { - t.Errorf("error after set client to nil: %v", err) - } - - client = Client() - if trans, ok := client.Transport.(*http.Transport); ok { - trans.MaxIdleConns = 1 - trans.DisableKeepAlives = true - _, err = Get(ts.URL) - if err != nil { - t.Errorf("error after change client's transport: %v", err) - } - } else { - t.Errorf("transport is not http.Transport: %+#v", client.Transport) - } -} - -func TestSetting(t *testing.T) { - defer func() { - if rc := recover(); rc != nil { - t.Errorf("panic happened while change setting: %v", rc) - } - }() - SetTimeout(2 * time.Second) - EnableCookie(false) - EnableCookie(true) - EnableInsecureTLS(true) - SetJSONIndent("", " ") - SetJSONEscapeHTML(false) - SetXMLIndent("", "\t") - SetProxyUrl("http://localhost:8080") - SetProxy(nil) -} diff --git a/textproto_reader.go b/textproto_reader.go new file mode 100644 index 00000000..1c09872a --- /dev/null +++ b/textproto_reader.go @@ -0,0 +1,587 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package req + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "math" + "net/textproto" + "sync" + + "github.com/imroc/req/v3/internal/dump" +) + +func isASCIILetter(b byte) bool { + b |= 0x20 // make lower case + return 'a' <= b && b <= 'z' +} + +// TODO: This should be a distinguishable error (ErrMessageTooLarge) +// to allow mime/multipart to detect it. +var errMessageTooLarge = errors.New("message too large") + +// A textprotoReader implements convenience methods for reading requests +// or responses from a text protocol network connection. +type textprotoReader struct { + R *bufio.Reader + buf []byte // a re-usable buffer for readContinuedLineSlice + readLine func() (line []byte, isPrefix bool, err error) +} + +// NewReader returns a new textprotoReader reading from r. +// +// To avoid denial of service attacks, the provided bufio.Reader +// should be reading from an io.LimitReader or similar textprotoReader to bound +// the size of responses. +func newTextprotoReader(r *bufio.Reader, ds dump.Dumpers) *textprotoReader { + commonHeaderOnce.Do(initCommonHeader) + t := &textprotoReader{R: r} + + if ds.ShouldDump() { + t.readLine = func() (line []byte, isPrefix bool, err error) { + line, err = t.R.ReadSlice('\n') + if len(line) == 0 { + if err != nil { + line = nil + } + return + } + err = nil + ds.DumpResponseHeader(line) + if line[len(line)-1] == '\n' { + drop := 1 + if len(line) > 1 && line[len(line)-2] == '\r' { + drop = 2 + } + line = line[:len(line)-drop] + } + return + } + } else { + t.readLine = t.R.ReadLine + } + + return t +} + +// ReadLine reads a single line from r, +// eliding the final \n or \r\n from the returned string. +func (r *textprotoReader) ReadLine() (string, error) { + line, err := r.readLineSlice(-1) + return string(line), err +} + +// readLineSlice reads a single line from r, +// up to lim bytes long (or unlimited if lim is less than 0), +// eliding the final \r or \r\n from the returned string. +func (r *textprotoReader) readLineSlice(lim int64) ([]byte, error) { + var line []byte + + for { + l, more, err := r.readLine() + if err != nil { + return nil, err + } + if lim >= 0 && int64(len(line))+int64(len(l)) > lim { + return nil, errMessageTooLarge + } + // Avoid the copy if the first call produced a full line. + if line == nil && !more { + return l, nil + } + line = append(line, l...) + if !more { + break + } + } + return line, nil +} + +// trim returns s with leading and trailing spaces and tabs removed. +// It does not assume Unicode or UTF-8. +func trim(s []byte) []byte { + i := 0 + for i < len(s) && (s[i] == ' ' || s[i] == '\t') { + i++ + } + n := len(s) + for n > i && (s[n-1] == ' ' || s[n-1] == '\t') { + n-- + } + return s[i:n] +} + +// readContinuedLineSlice reads continued lines from the reader buffer, +// returning a byte slice with all lines. The validateFirstLine function +// is run on the first read line, and if it returns an error then this +// error is returned from readContinuedLineSlice. +// It reads up to lim bytes of data (or unlimited if lim is less than 0). +func (r *textprotoReader) readContinuedLineSlice(lim int64, validateFirstLine func([]byte) error) ([]byte, error) { + if validateFirstLine == nil { + return nil, fmt.Errorf("missing validateFirstLine func") + } + + // Read the first line. + line, err := r.readLineSlice(lim) + if err != nil { + return nil, err + } + if len(line) == 0 { // blank line - no continuation + return line, nil + } + + if err := validateFirstLine(line); err != nil { + return nil, err + } + + // Optimistically assume that we have started to buffer the next line + // and it starts with an ASCII letter (the next header key), or a blank + // line, so we can avoid copying that buffered data around in memory + // and skipping over non-existent whitespace. + if r.R.Buffered() > 1 { + peek, _ := r.R.Peek(2) + if len(peek) > 0 && (isASCIILetter(peek[0]) || peek[0] == '\n') || + len(peek) == 2 && peek[0] == '\r' && peek[1] == '\n' { + return trim(line), nil + } + } + + // ReadByte or the next readLineSlice will flush the read buffer; + // copy the slice into buf. + r.buf = append(r.buf[:0], trim(line)...) + + if lim < 0 { + lim = math.MaxInt64 + } + lim -= int64(len(r.buf)) + + // Read continuation lines. + for r.skipSpace() > 0 { + r.buf = append(r.buf, ' ') + if int64(len(r.buf)) >= lim { + return nil, errMessageTooLarge + } + line, err := r.readLineSlice(lim - int64(len(r.buf))) + if err != nil { + break + } + r.buf = append(r.buf, trim(line)...) + } + return r.buf, nil +} + +// skipSpace skips R over all spaces and returns the number of bytes skipped. +func (r *textprotoReader) skipSpace() int { + n := 0 + for { + c, err := r.R.ReadByte() + if err != nil { + // Bufio will keep err until next read. + break + } + if c != ' ' && c != '\t' { + r.R.UnreadByte() + break + } + n++ + } + return n +} + +// A protocolError describes a protocol violation such +// as an invalid response or a hung-up connection. +type protocolError string + +func (p protocolError) Error() string { + return string(p) +} + +var colon = []byte(":") + +// ReadMIMEHeader reads a MIME-style header from r. +// The header is a sequence of possibly continued Key: Value lines +// ending in a blank line. +// The returned map m maps [CanonicalMIMEHeaderKey](key) to a +// sequence of values in the same order encountered in the input. +// +// For example, consider this input: +// +// My-Key: Value 1 +// Long-Key: Even +// Longer Value +// My-Key: Value 2 +// +// Given that input, ReadMIMEHeader returns the map: +// +// map[string][]string{ +// "My-Key": {"Value 1", "Value 2"}, +// "Long-Key": {"Even Longer Value"}, +// } +func (r *textprotoReader) ReadMIMEHeader() (textproto.MIMEHeader, error) { + return r.readMIMEHeader(math.MaxInt64, math.MaxInt64) +} + +// readMIMEHeader is a version of ReadMIMEHeader which takes a limit on the header size. +// It is called by the mime/multipart package. +func (r *textprotoReader) readMIMEHeader(maxMemory, maxHeaders int64) (textproto.MIMEHeader, error) { + // Avoid lots of small slice allocations later by allocating one + // large one ahead of time which we'll cut up into smaller + // slices. If this isn't big enough later, we allocate small ones. + var strs []string + hint := r.upcomingHeaderKeys() + if hint > 0 { + if hint > 1000 { + hint = 1000 // set a cap to avoid overallocation + } + strs = make([]string, hint) + } + + m := make(textproto.MIMEHeader, hint) + + // Account for 400 bytes of overhead for the MIMEHeader, plus 200 bytes per entry. + // Benchmarking map creation as of go1.20, a one-entry MIMEHeader is 416 bytes and large + // MIMEHeaders average about 200 bytes per entry. + maxMemory -= 400 + const mapEntryOverhead = 200 + + // The first line cannot start with a leading space. + if buf, err := r.R.Peek(1); err == nil && (buf[0] == ' ' || buf[0] == '\t') { + const errorLimit = 80 // arbitrary limit on how much of the line we'll quote + line, err := r.readLineSlice(errorLimit) + if err != nil { + return m, err + } + return m, protocolError("malformed MIME header initial line: " + string(line)) + } + + for { + kv, err := r.readContinuedLineSlice(maxMemory, mustHaveFieldNameColon) + if len(kv) == 0 { + return m, err + } + + // Key ends at first colon. + k, v, ok := bytes.Cut(kv, colon) + if !ok { + return m, protocolError("malformed MIME header line: " + string(kv)) + } + key, ok := canonicalMIMEHeaderKey(k) + if !ok { + return m, protocolError("malformed MIME header line: " + string(kv)) + } + for _, c := range v { + if !validHeaderValueByte(c) { + return m, protocolError("malformed MIME header line: " + string(kv)) + } + } + + maxHeaders-- + if maxHeaders < 0 { + return nil, errMessageTooLarge + } + + // Skip initial spaces in value. + value := string(bytes.TrimLeft(v, " \t")) + + vv := m[key] + if vv == nil { + maxMemory -= int64(len(key)) + maxMemory -= mapEntryOverhead + } + maxMemory -= int64(len(value)) + if maxMemory < 0 { + return m, errMessageTooLarge + } + if vv == nil && len(strs) > 0 { + // More than likely this will be a single-element key. + // Most headers aren't multi-valued. + // Set the capacity on strs[0] to 1, so any future append + // won't extend the slice into the other strings. + vv, strs = strs[:1:1], strs[1:] + vv[0] = value + m[key] = vv + } else { + m[key] = append(vv, value) + } + + if err != nil { + return m, err + } + } +} + +// mustHaveFieldNameColon ensures that, per RFC 7230, the +// field-name is on a single line, so the first line must +// contain a colon. +func mustHaveFieldNameColon(line []byte) error { + if bytes.IndexByte(line, ':') < 0 { + return protocolError(fmt.Sprintf("malformed MIME header: missing colon: %q", line)) + } + return nil +} + +var nl = []byte("\n") + +// upcomingHeaderKeys returns an approximation of the number of keys +// that will be in this header. If it gets confused, it returns 0. +func (r *textprotoReader) upcomingHeaderKeys() (n int) { + // Try to determine the 'hint' size. + r.R.Peek(1) // force a buffer load if empty + s := r.R.Buffered() + if s == 0 { + return + } + peek, _ := r.R.Peek(s) + for len(peek) > 0 && n < 1000 { + var line []byte + line, peek, _ = bytes.Cut(peek, nl) + if len(line) == 0 || (len(line) == 1 && line[0] == '\r') { + // Blank line separating headers from the body. + break + } + if line[0] == ' ' || line[0] == '\t' { + // Folded continuation of the previous line. + continue + } + n++ + } + return n +} + +const toLower = 'a' - 'A' + +// validHeaderFieldByte reports whether b is a valid byte in a header +// field name. RFC 7230 says: +// +// header-field = field-name ":" OWS field-value OWS +// field-name = token +// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." / +// "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA +// token = 1*tchar +func validHeaderFieldByte(b byte) bool { + return int(b) < len(isTokenTable) && isTokenTable[b] +} + +// canonicalMIMEHeaderKey is like CanonicalMIMEHeaderKey but is +// allowed to mutate the provided byte slice before returning the +// string. +// +// For invalid inputs (if a contains spaces or non-token bytes), a +// is unchanged and a string copy is returned. +// +// ok is true if the header key contains only valid characters and spaces. +// ReadMIMEHeader accepts header keys containing spaces, but does not +// canonicalize them. +func canonicalMIMEHeaderKey(a []byte) (_ string, ok bool) { + if len(a) == 0 { + return "", false + } + + // See if a looks like a header key. If not, return it unchanged. + noCanon := false + for _, c := range a { + if validHeaderFieldByte(c) { + continue + } + // Don't canonicalize. + if c == ' ' { + // We accept invalid headers with a space before the + // colon, but must not canonicalize them. + // See https://go.dev/issue/34540. + noCanon = true + continue + } + return string(a), false + } + if noCanon { + return string(a), true + } + + upper := true + for i, c := range a { + // Canonicalize: first letter upper case + // and upper case after each dash. + // (Host, User-Agent, If-Modified-Since). + // MIME headers are ASCII only, so no Unicode issues. + if upper && 'a' <= c && c <= 'z' { + c -= toLower + } else if !upper && 'A' <= c && c <= 'Z' { + c += toLower + } + a[i] = c + upper = c == '-' // for next time + } + commonHeaderOnce.Do(initCommonHeader) + // The compiler recognizes m[string(byteSlice)] as a special + // case, so a copy of a's bytes into a new string does not + // happen in this map lookup: + if v := commonHeader[string(a)]; v != "" { + return v, true + } + return string(a), true +} + +// validHeaderValueByte reports whether c is a valid byte in a header +// field value. RFC 7230 says: +// +// field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ] +// field-vchar = VCHAR / obs-text +// obs-text = %x80-FF +// +// RFC 5234 says: +// +// HTAB = %x09 +// SP = %x20 +// VCHAR = %x21-7E +func validHeaderValueByte(c byte) bool { + // mask is a 128-bit bitmap with 1s for allowed bytes, + // so that the byte c can be tested with a shift and an and. + // If c >= 128, then 1<>64)) == 0 +} + +// commonHeader interns common header strings. +var commonHeader map[string]string + +var commonHeaderOnce sync.Once + +func initCommonHeader() { + commonHeader = make(map[string]string) + for _, v := range []string{ + "Accept", + "Accept-Charset", + "Accept-Encoding", + "Accept-Language", + "Accept-Ranges", + "Cache-Control", + "Cc", + "Connection", + "Content-Id", + "Content-Language", + "Content-Length", + "Content-Transfer-Encoding", + "Content-Type", + "Cookie", + "Date", + "Dkim-Signature", + "Etag", + "Expires", + "From", + "Host", + "If-Modified-Since", + "If-None-Match", + "In-Reply-To", + "Last-Modified", + "Location", + "Message-Id", + "Mime-Version", + "Pragma", + "Received", + "Return-Path", + "Server", + "Set-Cookie", + "Subject", + "To", + "User-Agent", + "Via", + "X-Forwarded-For", + "X-Imforwards", + "X-Powered-By", + } { + commonHeader[v] = v + } +} + +// isTokenTable is a copy of net/http/lex.go's isTokenTable. +// See https://httpwg.github.io/specs/rfc7230.html#rule.token.separators +var isTokenTable = [127]bool{ + '!': true, + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + '*': true, + '+': true, + '-': true, + '.': true, + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + 'A': true, + 'B': true, + 'C': true, + 'D': true, + 'E': true, + 'F': true, + 'G': true, + 'H': true, + 'I': true, + 'J': true, + 'K': true, + 'L': true, + 'M': true, + 'N': true, + 'O': true, + 'P': true, + 'Q': true, + 'R': true, + 'S': true, + 'T': true, + 'U': true, + 'W': true, + 'V': true, + 'X': true, + 'Y': true, + 'Z': true, + '^': true, + '_': true, + '`': true, + 'a': true, + 'b': true, + 'c': true, + 'd': true, + 'e': true, + 'f': true, + 'g': true, + 'h': true, + 'i': true, + 'j': true, + 'k': true, + 'l': true, + 'm': true, + 'n': true, + 'o': true, + 'p': true, + 'q': true, + 'r': true, + 's': true, + 't': true, + 'u': true, + 'v': true, + 'w': true, + 'x': true, + 'y': true, + 'z': true, + '|': true, + '~': true, +} diff --git a/trace.go b/trace.go new file mode 100644 index 00000000..c87127e9 --- /dev/null +++ b/trace.go @@ -0,0 +1,164 @@ +package req + +import ( + "context" + "crypto/tls" + "fmt" + "net" + "net/http/httptrace" + "time" +) + +const ( + traceFmt = `TotalTime : %v +DNSLookupTime : %v +TCPConnectTime : %v +TLSHandshakeTime : %v +FirstResponseTime : %v +ResponseTime : %v +IsConnReused: : false +RemoteAddr : %v +LocalAddr : %v` + traceReusedFmt = `TotalTime : %v +FirstResponseTime : %v +ResponseTime : %v +IsConnReused: : true +RemoteAddr : %v +LocalAddr : %v` +) + +// Blame return the human-readable reason of why request is slowing. +func (t TraceInfo) Blame() string { + if t.RemoteAddr == nil { + return "trace is not enabled" + } + var mk string + var mv time.Duration + m := map[string]time.Duration{ + "on dns lookup": t.DNSLookupTime, + "on tcp connect": t.TCPConnectTime, + "on tls handshake": t.TLSHandshakeTime, + "from connection ready to server respond first byte": t.FirstResponseTime, + "from server respond first byte to request completion": t.ResponseTime, + } + for k, v := range m { + if v > mv { + mk = k + mv = v + } + } + if mk == "" { + return "nothing to blame" + } + return fmt.Sprintf("the request total time is %v, and costs %v %s", t.TotalTime, mv, mk) +} + +// String return the details of trace information. +func (t TraceInfo) String() string { + if t.RemoteAddr == nil { + return "trace is not enabled" + } + if t.IsConnReused { + return fmt.Sprintf(traceReusedFmt, t.TotalTime, t.FirstResponseTime, t.ResponseTime, t.RemoteAddr, t.LocalAddr) + } + return fmt.Sprintf(traceFmt, t.TotalTime, t.DNSLookupTime, t.TCPConnectTime, t.TLSHandshakeTime, t.FirstResponseTime, t.ResponseTime, t.RemoteAddr, t.LocalAddr) +} + +// TraceInfo represents the trace information. +type TraceInfo struct { + // DNSLookupTime is a duration that transport took to perform + // DNS lookup. + DNSLookupTime time.Duration + + // ConnectTime is a duration that took to obtain a successful connection. + ConnectTime time.Duration + + // TCPConnectTime is a duration that took to obtain the TCP connection. + TCPConnectTime time.Duration + + // TLSHandshakeTime is a duration that TLS handshake took place. + TLSHandshakeTime time.Duration + + // FirstResponseTime is a duration that server took to respond first byte since + // connection ready (after tls handshake if it's tls and not a reused connection). + FirstResponseTime time.Duration + + // ResponseTime is a duration since first response byte from server to + // request completion. + ResponseTime time.Duration + + // TotalTime is a duration that total request took end-to-end. + TotalTime time.Duration + + // IsConnReused is whether this connection has been previously + // used for another HTTP request. + IsConnReused bool + + // IsConnWasIdle is whether this connection was obtained from an + // idle pool. + IsConnWasIdle bool + + // ConnIdleTime is a duration how long the connection was previously + // idle, if IsConnWasIdle is true. + ConnIdleTime time.Duration + + // RemoteAddr returns the remote network address. + RemoteAddr net.Addr + + // LocalAddr returns the local network address. + LocalAddr net.Addr +} + +type clientTrace struct { + getConn time.Time + dnsStart time.Time + dnsDone time.Time + connectDone time.Time + tlsHandshakeStart time.Time + tlsHandshakeDone time.Time + gotConn time.Time + gotFirstResponseByte time.Time + endTime time.Time + gotConnInfo httptrace.GotConnInfo +} + +func (t *clientTrace) createContext(ctx context.Context) context.Context { + return httptrace.WithClientTrace( + ctx, + &httptrace.ClientTrace{ + DNSStart: func(_ httptrace.DNSStartInfo) { + t.dnsStart = time.Now() + }, + DNSDone: func(_ httptrace.DNSDoneInfo) { + t.dnsDone = time.Now() + }, + ConnectStart: func(_, _ string) { + if t.dnsDone.IsZero() { + t.dnsDone = time.Now() + } + if t.dnsStart.IsZero() { + t.dnsStart = t.dnsDone + } + }, + ConnectDone: func(net, addr string, err error) { + t.connectDone = time.Now() + }, + GetConn: func(_ string) { + t.getConn = time.Now() + }, + GotConn: func(ci httptrace.GotConnInfo) { + t.gotConn = time.Now() + t.gotConnInfo = ci + }, + GotFirstResponseByte: func() { + t.gotFirstResponseByte = time.Now() + }, + TLSHandshakeStart: func() { + t.tlsHandshakeStart = time.Now() + }, + TLSHandshakeDone: func(_ tls.ConnectionState, _ error) { + t.tlsHandshakeDone = time.Now() + }, + }, + ) +} diff --git a/transfer.go b/transfer.go new file mode 100644 index 00000000..e9cd5a56 --- /dev/null +++ b/transfer.go @@ -0,0 +1,1073 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package req + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "net/http" + "net/textproto" + "reflect" + "slices" + "strconv" + "strings" + "sync" + "time" + + "github.com/imroc/req/v3/internal" + "github.com/imroc/req/v3/internal/ascii" + "github.com/imroc/req/v3/internal/dump" + "github.com/imroc/req/v3/internal/godebug" + + "golang.org/x/net/http/httpguts" +) + +type errorReader struct { + err error +} + +func (r errorReader) Read(p []byte) (n int, err error) { + return 0, r.err +} + +type byteReader struct { + b byte + done bool +} + +func (br *byteReader) Read(p []byte) (n int, err error) { + if br.done { + return 0, io.EOF + } + if len(p) == 0 { + return 0, nil + } + br.done = true + p[0] = br.b + return 1, io.EOF +} + +// transferWriter inspects the fields of a user-supplied Request or Response, +// sanitizes them without changing the user object and provides methods for +// writing the respective header, body and trailer in wire format. +type transferWriter struct { + Method string + Body io.Reader + BodyCloser io.Closer + ContentLength int64 // -1 means unknown, 0 means exactly none + Close bool + TransferEncoding []string + Header http.Header + Trailer http.Header + bodyReadError error // any non-EOF error from reading Body + + FlushHeaders bool // flush headers to network before body + ByteReadCh chan readResult // non-nil if probeRequestBody called +} + +func newTransferWriter(r *http.Request) (t *transferWriter, err error) { + t = &transferWriter{} + + // Extract relevant fields + atLeastHTTP11 := false + if r.ContentLength != 0 && r.Body == nil { + return nil, fmt.Errorf("http: Request.ContentLength=%d with nil Body", r.ContentLength) + } + t.Method = valueOrDefault(r.Method, "GET") + t.Close = r.Close + t.TransferEncoding = r.TransferEncoding + t.Header = r.Header + t.Trailer = r.Trailer + t.Body = r.Body + t.BodyCloser = r.Body + t.ContentLength = outgoingLength(r) + if t.ContentLength < 0 && len(t.TransferEncoding) == 0 && t.shouldSendChunkedRequestBody() { + t.TransferEncoding = []string{"chunked"} + } + // If there's a body, conservatively flush the headers + // to any bufio.Writer we're writing to, just in case + // the server needs the headers early, before we copy + // the body and possibly block. We make an exception + // for the common standard library in-memory types, + // though, to avoid unnecessary TCP packets on the + // wire. (Issue 22088.) + if t.ContentLength != 0 && !isKnownInMemoryReader(t.Body) { + t.FlushHeaders = true + } + + atLeastHTTP11 = true // Transport requests are always 1.1 or 2.0 + + // Sanitize Body,ContentLength,TransferEncoding + if !atLeastHTTP11 || t.Body == nil { + t.TransferEncoding = nil + } + if chunked(t.TransferEncoding) { + t.ContentLength = -1 + } else if t.Body == nil { // no chunking, no body + t.ContentLength = 0 + } + + // Sanitize Trailer + if !chunked(t.TransferEncoding) { + t.Trailer = nil + } + + return t, nil +} + +// shouldSendChunkedRequestBody reports whether we should try to send a +// chunked request body to the server. In particular, the case we really +// want to prevent is sending a GET or other typically-bodyless request to a +// server with a chunked body when the body has zero bytes, since GETs with +// bodies (while acceptable according to specs), even zero-byte chunked +// bodies, are approximately never seen in the wild and confuse most +// servers. See Issue 18257, as one example. +// +// The only reason we'd send such a request is if the user set the Body to a +// non-nil value (say, io.NopCloser(bytes.NewReader(nil))) and didn't +// set ContentLength, or NewRequest set it to -1 (unknown), so then we assume +// there's bytes to send. +// +// This code tries to read a byte from the Request.Body in such cases to see +// whether the body actually has content (super rare) or is actually just +// a non-nil content-less ReadCloser (the more common case). In that more +// common case, we act as if their Body were nil instead, and don't send +// a body. +func (t *transferWriter) shouldSendChunkedRequestBody() bool { + // Note that t.ContentLength is the corrected content length + // from rr.outgoingLength, so 0 actually means zero, not unknown. + if t.ContentLength >= 0 || t.Body == nil { // redundant checks; caller did them + return false + } + if t.Method == "CONNECT" { + return false + } + if requestMethodUsuallyLacksBody(t.Method) { + // Only probe the Request.Body for GET/HEAD/DELETE/etc + // requests, because it's only those types of requests + // that confuse servers. + t.probeRequestBody() // adjusts t.Body, t.ContentLength + return t.Body != nil + } + // For all other request types (PUT, POST, PATCH, or anything + // made-up we've never heard of), assume it's normal and the server + // can deal with a chunked request body. Maybe we'll adjust this + // later. + return true +} + +// probeRequestBody reads a byte from t.Body to see whether it's empty +// (returns io.EOF right away). +// +// But because we've had problems with this blocking users in the past +// (issue 17480) when the body is a pipe (perhaps waiting on the response +// headers before the pipe is fed data), we need to be careful and bound how +// long we wait for it. This delay will only affect users if all the following +// are true: +// - the request body blocks +// - the content length is not set (or set to -1) +// - the method doesn't usually have a body (GET, HEAD, DELETE, ...) +// - there is no transfer-encoding=chunked already set. +// +// In other words, this delay will not normally affect anybody, and there +// are workarounds if it does. +func (t *transferWriter) probeRequestBody() { + t.ByteReadCh = make(chan readResult, 1) + go func(body io.Reader) { + var buf [1]byte + var rres readResult + rres.n, rres.err = body.Read(buf[:]) + if rres.n == 1 { + rres.b = buf[0] + } + t.ByteReadCh <- rres + close(t.ByteReadCh) + }(t.Body) + timer := time.NewTimer(200 * time.Millisecond) + select { + case rres := <-t.ByteReadCh: + timer.Stop() + if rres.n == 0 && rres.err == io.EOF { + // It was empty. + t.Body = nil + t.ContentLength = 0 + } else if rres.n == 1 { + if rres.err != nil { + t.Body = io.MultiReader(&byteReader{b: rres.b}, errorReader{rres.err}) + } else { + t.Body = io.MultiReader(&byteReader{b: rres.b}, t.Body) + } + } else if rres.err != nil { + t.Body = errorReader{rres.err} + } + case <-timer.C: + // Too slow. Don't wait. Read it later, and keep + // assuming that this is ContentLength == -1 + // (unknown), which means we'll send a + // "Transfer-Encoding: chunked" header. + t.Body = io.MultiReader(finishAsyncByteRead{t}, t.Body) + // Request that Request.Write flush the headers to the + // network before writing the body, since our body may not + // become readable until it's seen the response headers. + t.FlushHeaders = true + } +} + +func noResponseBodyExpected(requestMethod string) bool { + return requestMethod == "HEAD" +} + +func (t *transferWriter) shouldSendContentLength() bool { + if chunked(t.TransferEncoding) { + return false + } + if t.ContentLength > 0 { + return true + } + if t.ContentLength < 0 { + return false + } + // Many servers expect a Content-Length for these methods + if t.Method == "POST" || t.Method == "PUT" || t.Method == "PATCH" { + return true + } + if t.ContentLength == 0 && isIdentity(t.TransferEncoding) { + if t.Method == "GET" || t.Method == "HEAD" { + return false + } + return true + } + + return false +} + +func (t *transferWriter) writeHeader(writeHeader func(key string, values ...string) error) error { + if t.Close && !hasToken(headerGet(t.Header, "Connection"), "close") { + err := writeHeader("Connection", "close") + if err != nil { + return err + } + } + + // Write Content-Length and/or Transfer-Encoding whose values are a + // function of the sanitized field triple (Body, ContentLength, + // TransferEncoding) + if t.shouldSendContentLength() { + err := writeHeader("Content-Length", strconv.FormatInt(t.ContentLength, 10)) + if err != nil { + return err + } + } else if chunked(t.TransferEncoding) { + err := writeHeader("Transfer-Encoding", "chunked") + if err != nil { + return err + } + } + + // Write Trailer header + if t.Trailer != nil { + keys := make([]string, 0, len(t.Trailer)) + for k := range t.Trailer { + k = http.CanonicalHeaderKey(k) + switch k { + case "Transfer-Encoding", "Trailer", "Content-Length": + return badStringError("invalid Trailer key", k) + } + keys = append(keys, k) + } + if len(keys) > 0 { + slices.Sort(keys) + // TODO: could do better allocation-wise here, but trailers are rare, + // so being lazy for now. + err := writeHeader("Trailer", strings.Join(keys, ",")) + if err != nil { + return err + } + } + } + + return nil +} + +// always closes t.BodyCloser +func (t *transferWriter) writeBody(w io.Writer, dumps []*dump.Dumper) (err error) { + var ncopy int64 + closed := false + defer func() { + if closed || t.BodyCloser == nil { + return + } + if closeErr := t.BodyCloser.Close(); closeErr != nil && err == nil { + err = closeErr + } + }() + + rw := w // raw writer + for _, dump := range dumps { + if dump.RequestBody() { + w = dump.WrapRequestBodyWriter(w) + } + } + + // Write body. We "unwrap" the body first if it was wrapped in a + // nopCloser or readTrackingBody. This is to ensure that we can take advantage of + // OS-level optimizations in the event that the body is an + // *os.File. + if t.Body != nil { + body := t.unwrapBody() + if chunked(t.TransferEncoding) { + if bw, ok := rw.(*bufio.Writer); ok { + rw = &internal.FlushAfterChunkWriter{Writer: bw} + } + cw := internal.NewChunkedWriter(rw) + for _, dump := range dumps { + if dump.RequestBody() { + cw = dump.WrapRequestBodyWriteCloser(cw) + } + } + _, err = t.doBodyCopy(cw, body) + if err == nil { + err = cw.Close() + } + } else if t.ContentLength == -1 { + dst := w + if t.Method == "CONNECT" { + dst = bufioFlushWriter{dst} + } + ncopy, err = t.doBodyCopy(dst, body) + } else { + ncopy, err = t.doBodyCopy(w, io.LimitReader(body, t.ContentLength)) + if err != nil { + return err + } + var nextra int64 + nextra, err = t.doBodyCopy(io.Discard, body) + ncopy += nextra + } + if err != nil { + return err + } + for _, dump := range dumps { + if dump.RequestBody() { + dump.DumpDefault([]byte("\r\n")) + } + } + } + if t.BodyCloser != nil { + closed = true + if err := t.BodyCloser.Close(); err != nil { + return err + } + } + + if t.ContentLength != -1 && t.ContentLength != ncopy { + return fmt.Errorf("http: ContentLength=%d with Body length %d", + t.ContentLength, ncopy) + } + + if chunked(t.TransferEncoding) { + // Write Trailer header + if t.Trailer != nil { + if err := t.Trailer.Write(w); err != nil { + return err + } + } + // Last chunk, empty trailer + _, err = io.WriteString(w, "\r\n") + } + return err +} + +// doBodyCopy wraps a copy operation, with any resulting error also +// being saved in bodyReadError. +// +// This function is only intended for use in writeBody. +func (t *transferWriter) doBodyCopy(dst io.Writer, src io.Reader) (n int64, err error) { + buf := getCopyBuf() + defer putCopyBuf(buf) + n, err = io.CopyBuffer(dst, src, buf) + if err != nil && err != io.EOF { + t.bodyReadError = err + } + return +} + +// unwrapBody unwraps the body's inner reader if it's a +// nopCloser. This is to ensure that body writes sourced from local +// files (*os.File types) are properly optimized. +// +// This function is only intended for use in writeBody. +func (t *transferWriter) unwrapBody() io.Reader { + if r, ok := unwrapNopCloser(t.Body); ok { + return r + } + if r, ok := t.Body.(*readTrackingBody); ok { + r.didRead = true + return r.ReadCloser + } + return t.Body +} + +type transferReader struct { + // Input + Header http.Header + StatusCode int + RequestMethod string + ProtoMajor int + ProtoMinor int + // Output + Body io.ReadCloser + ContentLength int64 + Chunked bool + Close bool + Trailer http.Header +} + +func (t *transferReader) protoAtLeast(m, n int) bool { + return t.ProtoMajor > m || (t.ProtoMajor == m && t.ProtoMinor >= n) +} + +// bodyAllowedForStatus reports whether a given response status code +// permits a body. See RFC 7230, section 3.3. +func bodyAllowedForStatus(status int) bool { + switch { + case status >= 100 && status <= 199: + return false + case status == 204: + return false + case status == 304: + return false + } + return true +} + +// msg is *http.Request or *http.Response. +func readTransfer(msg interface{}, r *bufio.Reader) (err error) { + t := &transferReader{RequestMethod: "GET"} + + // Unify input + isResponse := false + switch rr := msg.(type) { + case *http.Response: + t.Header = rr.Header + t.StatusCode = rr.StatusCode + t.ProtoMajor = rr.ProtoMajor + t.ProtoMinor = rr.ProtoMinor + t.Close = shouldClose(t.ProtoMajor, t.ProtoMinor, t.Header, true) + isResponse = true + if rr.Request != nil { + t.RequestMethod = rr.Request.Method + } + default: + panic("unexpected type") + } + + // Default to HTTP/1.1 + if t.ProtoMajor == 0 && t.ProtoMinor == 0 { + t.ProtoMajor, t.ProtoMinor = 1, 1 + } + + // Transfer-Encoding: chunked, and overriding Content-Length. + if err := t.parseTransferEncoding(); err != nil { + return err + } + + realLength, err := fixLength(isResponse, t.StatusCode, t.RequestMethod, t.Header, t.Chunked) + if err != nil { + return err + } + if isResponse && t.RequestMethod == "HEAD" { + if n, err := parseContentLength(t.Header["Content-Length"]); err != nil { + return err + } else { + t.ContentLength = n + } + } else { + t.ContentLength = realLength + } + + // Trailer + t.Trailer, err = fixTrailer(t.Header, t.Chunked) + if err != nil { + return err + } + + // If there is no Content-Length or chunked Transfer-Encoding on a *Response + // and the status is not 1xx, 204 or 304, then the body is unbounded. + // See RFC 7230, section 3.3. + switch msg.(type) { + case *http.Response: + if realLength == -1 && !t.Chunked && bodyAllowedForStatus(t.StatusCode) { + // Unbounded body. + t.Close = true + } + } + + // Prepare body reader. ContentLength < 0 means chunked encoding + // or close connection when finished, since multipart is not supported yet + switch { + case t.Chunked: + if isResponse && (noResponseBodyExpected(t.RequestMethod) || !bodyAllowedForStatus(t.StatusCode)) { + t.Body = NoBody + } else { + t.Body = &body{src: internal.NewChunkedReader(r), hdr: msg, r: r, closing: t.Close} + } + case realLength == 0: + t.Body = NoBody + case realLength > 0: + t.Body = &body{src: io.LimitReader(r, realLength), closing: t.Close} + default: + // realLength < 0, i.e. "Content-Length" not mentioned in header + if t.Close { + // Close semantics (i.e. HTTP/1.0) + t.Body = &body{src: r, closing: t.Close} + } else { + // Persistent connection (i.e. HTTP/1.1) + t.Body = NoBody + } + } + + // Unify output + switch rr := msg.(type) { + case *http.Response: + rr.Body = t.Body + rr.ContentLength = t.ContentLength + if t.Chunked { + rr.TransferEncoding = []string{"chunked"} + } + rr.Close = t.Close + rr.Trailer = t.Trailer + } + + return nil +} + +// Checks whether chunked is part of the encodings stack. +func chunked(te []string) bool { return len(te) > 0 && te[0] == "chunked" } + +// Checks whether the encoding is explicitly "identity". +func isIdentity(te []string) bool { return len(te) == 1 && te[0] == "identity" } + +// unsupportedTEError reports unsupported transfer-encodings. +type unsupportedTEError struct { + err string +} + +func (uste *unsupportedTEError) Error() string { + return uste.err +} + +// parseTransferEncoding sets t.Chunked based on the Transfer-Encoding header. +func (t *transferReader) parseTransferEncoding() error { + raw, present := t.Header["Transfer-Encoding"] + if !present { + return nil + } + delete(t.Header, "Transfer-Encoding") + + // Issue 12785; ignore Transfer-Encoding on HTTP/1.0 requests. + if !t.protoAtLeast(1, 1) { + return nil + } + + // Like nginx, we only support a single Transfer-Encoding header field, and + // only if set to "chunked". This is one of the most security sensitive + // surfaces in HTTP/1.1 due to the risk of request smuggling, so we keep it + // strict and simple. + if len(raw) != 1 { + return &unsupportedTEError{fmt.Sprintf("too many transfer encodings: %q", raw)} + } + if !ascii.EqualFold(raw[0], "chunked") { + return &unsupportedTEError{fmt.Sprintf("unsupported transfer encoding: %q", raw[0])} + } + + t.Chunked = true + return nil +} + +// Determine the expected body length, using RFC 7230 Section 3.3. This +// function is not a method, because ultimately it should be shared by +// ReadResponse and ReadRequest. +func fixLength(isResponse bool, status int, requestMethod string, header http.Header, chunked bool) (n int64, err error) { + isRequest := !isResponse + contentLens := header["Content-Length"] + + // Hardening against HTTP request smuggling + if len(contentLens) > 1 { + // Per RFC 7230 Section 3.3.2, prevent multiple + // Content-Length headers if they differ in value. + // If there are dups of the value, remove the dups. + // See Issue 16490. + first := textproto.TrimString(contentLens[0]) + for _, ct := range contentLens[1:] { + if first != textproto.TrimString(ct) { + return 0, fmt.Errorf("http: message cannot contain multiple Content-Length headers; got %q", contentLens) + } + } + + // deduplicate Content-Length + header.Del("Content-Length") + header.Add("Content-Length", first) + + contentLens = header["Content-Length"] + } + + // Reject requests with invalid Content-Length headers. + if len(contentLens) > 0 { + n, err = parseContentLength(contentLens) + if err != nil { + return -1, err + } + } + + // Logic based on response type or status + if isResponse && noResponseBodyExpected(requestMethod) { + return 0, nil + } + if status/100 == 1 { + return 0, nil + } + switch status { + case 204, 304: + return 0, nil + } + + // According to RFC 9112, "If a message is received with both a + // Transfer-Encoding and a Content-Length header field, the Transfer-Encoding + // overrides the Content-Length. Such a message might indicate an attempt to + // perform request smuggling (Section 11.2) or response splitting (Section 11.1) + // and ought to be handled as an error. An intermediary that chooses to forward + // the message MUST first remove the received Content-Length field and process + // the Transfer-Encoding (as described below) prior to forwarding the message downstream." + // + // Chunked-encoding requests with either valid Content-Length + // headers or no Content-Length headers are accepted after removing + // the Content-Length field from header. + // + // Logic based on Transfer-Encoding + if chunked { + header.Del("Content-Length") + return -1, nil + } + + // Logic based on Content-Length + if len(contentLens) > 0 { + return n, nil + } + + header.Del("Content-Length") + + if isRequest { + // RFC 7230 neither explicitly permits nor forbids an + // entity-body on a GET request so we permit one if + // declared, but we default to 0 here (not -1 below) + // if there's no mention of a body. + // Likewise, all other request methods are assumed to have + // no body if neither Transfer-Encoding chunked nor a + // Content-Length are set. + return 0, nil + } + + // Body-EOF logic based on other methods (like closing, or chunked coding) + return -1, nil +} + +// Determine whether to hang up after sending a request and body, or +// receiving a response and body +// 'header' is the request headers. +func shouldClose(major, minor int, header http.Header, removeCloseHeader bool) bool { + if major < 1 { + return true + } + + conv := header["Connection"] + hasClose := httpguts.HeaderValuesContainsToken(conv, "close") + if major == 1 && minor == 0 { + return hasClose || !httpguts.HeaderValuesContainsToken(conv, "keep-alive") + } + + if hasClose && removeCloseHeader { + header.Del("Connection") + } + + return hasClose +} + +// Parse the trailer header. +func fixTrailer(header http.Header, chunked bool) (http.Header, error) { + vv, ok := header["Trailer"] + if !ok { + return nil, nil + } + if !chunked { + // Trailer and no chunking: + // this is an invalid use case for trailer header. + // Nevertheless, no error will be returned and we + // let users decide if this is a valid HTTP message. + // The Trailer header will be kept in Response.Header + // but not populate Response.Trailer. + // See issue #27197. + return nil, nil + } + header.Del("Trailer") + + trailer := make(http.Header) + var err error + for _, v := range vv { + foreachHeaderElement(v, func(key string) { + key = http.CanonicalHeaderKey(key) + switch key { + case "Transfer-Encoding", "Trailer", "Content-Length": + if err == nil { + err = badStringError("bad trailer key", key) + return + } + } + trailer[key] = nil + }) + } + if err != nil { + return nil, err + } + if len(trailer) == 0 { + return nil, nil + } + return trailer, nil +} + +// body turns a textprotoReader into a ReadCloser. +// Close ensures that the body has been fully read +// and then reads the trailer if necessary. +type body struct { + src io.Reader + hdr interface{} // non-nil (Response or Request) value means read trailer + r *bufio.Reader // underlying wire-format reader for the trailer + closing bool // is the connection to be closed after reading body? + doEarlyClose bool // whether Close should stop early + + mu sync.Mutex // guards following, and calls to Read and Close + sawEOF bool + closed bool + earlyClose bool // Close called and we didn't read to the end of src + onHitEOF func() // if non-nil, func to call when EOF is Read +} + +func (b *body) Read(p []byte) (n int, err error) { + b.mu.Lock() + defer b.mu.Unlock() + if b.closed { + return 0, http.ErrBodyReadAfterClose + } + return b.readLocked(p) +} + +// Must hold b.mu. +func (b *body) readLocked(p []byte) (n int, err error) { + if b.sawEOF { + return 0, io.EOF + } + n, err = b.src.Read(p) + + if err == io.EOF { + b.sawEOF = true + // Chunked case. Read the trailer. + if b.hdr != nil { + if e := b.readTrailer(); e != nil { + err = e + // Something went wrong in the trailer, we must not allow any + // further reads of any kind to succeed from body, nor any + // subsequent requests on the server connection. See + // golang.org/issue/12027 + b.sawEOF = false + b.closed = true + } + b.hdr = nil + } else { + // If the server declared the Content-Length, our body is a LimitedReader + // and we need to check whether this EOF arrived early. + if lr, ok := b.src.(*io.LimitedReader); ok && lr.N > 0 { + err = io.ErrUnexpectedEOF + } + } + } + + // If we can return an EOF here along with the read data, do + // so. This is optional per the io.textprotoReader contract, but doing + // so helps the HTTP transport code recycle its connection + // earlier (since it will see this EOF itself), even if the + // client doesn't do future reads or Close. + if err == nil && n > 0 { + if lr, ok := b.src.(*io.LimitedReader); ok && lr.N == 0 { + err = io.EOF + b.sawEOF = true + } + } + + if b.sawEOF && b.onHitEOF != nil { + b.onHitEOF() + } + + return n, err +} + +var ( + singleCRLF = []byte("\r\n") + doubleCRLF = []byte("\r\n\r\n") +) + +func seeUpcomingDoubleCRLF(r *bufio.Reader) bool { + for peekSize := 4; ; peekSize++ { + // This loop stops when Peek returns an error, + // which it does when r's buffer has been filled. + buf, err := r.Peek(peekSize) + if bytes.HasSuffix(buf, doubleCRLF) { + return true + } + if err != nil { + break + } + } + return false +} + +var errTrailerEOF = errors.New("http: unexpected EOF reading trailer") + +func (b *body) readTrailer() error { + // The common case, since nobody uses trailers. + buf, err := b.r.Peek(2) + if bytes.Equal(buf, singleCRLF) { + b.r.Discard(2) + return nil + } + if len(buf) < 2 { + return errTrailerEOF + } + if err != nil { + return err + } + + // Make sure there's a header terminator coming up, to prevent + // a DoS with an unbounded size Trailer. It's not easy to + // slip in a LimitReader here, as textproto.NewReader requires + // a concrete *bufio.textprotoReader. Also, we can't get all the way + // back up to our conn's LimitedReader that *might* be backing + // this bufio.textprotoReader. Instead, a hack: we iteratively Peek up + // to the bufio.textprotoReader's max size, looking for a double CRLF. + // This limits the trailer to the underlying buffer size, typically 4kB. + if !seeUpcomingDoubleCRLF(b.r) { + return errors.New("http: suspiciously long trailer after chunked body") + } + + hdr, err := textproto.NewReader(b.r).ReadMIMEHeader() + if err != nil { + if err == io.EOF { + return errTrailerEOF + } + return err + } + switch rr := b.hdr.(type) { + case *http.Request: + mergeSetHeader(&rr.Trailer, http.Header(hdr)) + case *http.Response: + mergeSetHeader(&rr.Trailer, http.Header(hdr)) + } + return nil +} + +func mergeSetHeader(dst *http.Header, src http.Header) { + if *dst == nil { + *dst = src + return + } + for k, vv := range src { + (*dst)[k] = vv + } +} + +// unreadDataSizeLocked returns the number of bytes of unread input. +// It returns -1 if unknown. +// b.mu must be held. +func (b *body) unreadDataSizeLocked() int64 { + if lr, ok := b.src.(*io.LimitedReader); ok { + return lr.N + } + return -1 +} + +func (b *body) Close() error { + b.mu.Lock() + defer b.mu.Unlock() + if b.closed { + return nil + } + var err error + switch { + case b.sawEOF: + // Already saw EOF, so no need going to look for it. + case b.hdr == nil && b.closing: + // no trailer and closing the connection next. + // no point in reading to EOF. + case b.doEarlyClose: + // Read up to maxPostHandlerReadBytes bytes of the body, looking + // for EOF (and trailers), so we can re-use this connection. + if lr, ok := b.src.(*io.LimitedReader); ok && lr.N > maxPostHandlerReadBytes { + // There was a declared Content-Length, and we have more bytes remaining + // than our maxPostHandlerReadBytes tolerance. So, give up. + b.earlyClose = true + } else { + var n int64 + // Consume the body, or, which will also lead to us reading + // the trailer headers after the body, if present. + n, err = io.CopyN(io.Discard, bodyLocked{b}, maxPostHandlerReadBytes) + if err == io.EOF { + err = nil + } + if n == maxPostHandlerReadBytes { + b.earlyClose = true + } + } + default: + // Fully consume the body, which will also lead to us reading + // the trailer headers after the body, if present. + _, err = io.Copy(io.Discard, bodyLocked{b}) + } + b.closed = true + return err +} + +func (b *body) didEarlyClose() bool { + b.mu.Lock() + defer b.mu.Unlock() + return b.earlyClose +} + +// bodyRemains reports whether future Read calls might +// yield data. +func (b *body) bodyRemains() bool { + b.mu.Lock() + defer b.mu.Unlock() + return !b.sawEOF +} + +func (b *body) registerOnHitEOF(fn func()) { + b.mu.Lock() + defer b.mu.Unlock() + b.onHitEOF = fn +} + +// bodyLocked is an io.Reader reading from a *body when its mutex is +// already held. +type bodyLocked struct { + b *body +} + +func (bl bodyLocked) Read(p []byte) (n int, err error) { + if bl.b.closed { + return 0, http.ErrBodyReadAfterClose + } + return bl.b.readLocked(p) +} + +var httplaxContentLength = godebug.New("httplaxcontentlength") + +// parseContentLength checks that the header is valid and then trims +// whitespace. It returns -1 if no value is set otherwise the value +// if it's >= 0. +func parseContentLength(clHeaders []string) (int64, error) { + if len(clHeaders) == 0 { + return -1, nil + } + cl := textproto.TrimString(clHeaders[0]) + + // The Content-Length must be a valid numeric value. + // See: https://datatracker.ietf.org/doc/html/rfc2616/#section-14.13 + if cl == "" { + if httplaxContentLength.Value() == "1" { + httplaxContentLength.IncNonDefault() + return -1, nil + } + return 0, badStringError("invalid empty Content-Length", cl) + } + n, err := strconv.ParseUint(cl, 10, 63) + if err != nil { + return 0, badStringError("bad Content-Length", cl) + } + return int64(n), nil +} + +// finishAsyncByteRead finishes reading the 1-byte sniff +// from the ContentLength==0, Body!=nil case. +type finishAsyncByteRead struct { + tw *transferWriter +} + +func (fr finishAsyncByteRead) Read(p []byte) (n int, err error) { + if len(p) == 0 { + return + } + rres := <-fr.tw.ByteReadCh + n, err = rres.n, rres.err + if n == 1 { + p[0] = rres.b + } + if err == nil { + err = io.EOF + } + return +} + +var ( + nopCloserType = reflect.TypeOf(io.NopCloser(nil)) + nopCloserWriterToType = reflect.TypeOf(io.NopCloser(struct { + io.Reader + io.WriterTo + }{})) +) + +// unwrapNopCloser return the underlying reader and true if r is a NopCloser +// else it return false. +func unwrapNopCloser(r io.Reader) (underlyingReader io.Reader, isNopCloser bool) { + switch reflect.TypeOf(r) { + case nopCloserType, nopCloserWriterToType: + return reflect.ValueOf(r).Field(0).Interface().(io.Reader), true + default: + return nil, false + } +} + +// isKnownInMemoryReader reports whether r is a type known to not +// block on Read. Its caller uses this as an optional optimization to +// send fewer TCP packets. +func isKnownInMemoryReader(r io.Reader) bool { + switch r.(type) { + case *bytes.Reader, *bytes.Buffer, *strings.Reader: + return true + } + if r, ok := unwrapNopCloser(r); ok { + return isKnownInMemoryReader(r) + } + if r, ok := r.(*readTrackingBody); ok { + return isKnownInMemoryReader(r.ReadCloser) + } + return false +} + +// bufioFlushWriter is an io.Writer wrapper that flushes all writes +// on its wrapped writer if it's a *bufio.Writer. +type bufioFlushWriter struct{ w io.Writer } + +func (fw bufioFlushWriter) Write(p []byte) (n int, err error) { + n, err = fw.w.Write(p) + if bw, ok := fw.w.(*bufio.Writer); n > 0 && ok { + ferr := bw.Flush() + if ferr != nil && err == nil { + err = ferr + } + } + return +} diff --git a/transport.go b/transport.go new file mode 100644 index 00000000..f4aa9e3b --- /dev/null +++ b/transport.go @@ -0,0 +1,3779 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// HTTP client implementation. See RFC 7230 through 7235. +// +// This is the low-level Transport implementation of http.RoundTripper. +// The high-level interface is in client.go. + +package req + +import ( + "bufio" + "compress/gzip" + "container/list" + "context" + "crypto/tls" + "errors" + "fmt" + "io" + "log" + "mime" + "net" + "net/http" + "net/http/httptrace" + "net/textproto" + "net/url" + "runtime" + "strconv" + "strings" + "sync" + "time" + _ "unsafe" + + "github.com/imroc/req/v3/http2" + "github.com/imroc/req/v3/internal/altsvcutil" + "github.com/imroc/req/v3/internal/ascii" + "github.com/imroc/req/v3/internal/common" + "github.com/imroc/req/v3/internal/compress" + "github.com/imroc/req/v3/internal/dump" + "github.com/imroc/req/v3/internal/header" + h2internal "github.com/imroc/req/v3/internal/http2" + "github.com/imroc/req/v3/internal/http3" + "github.com/imroc/req/v3/internal/netutil" + "github.com/imroc/req/v3/internal/socks" + "github.com/imroc/req/v3/internal/transport" + "github.com/imroc/req/v3/internal/util" + "github.com/imroc/req/v3/pkg/altsvc" + reqtls "github.com/imroc/req/v3/pkg/tls" + htmlcharset "golang.org/x/net/html/charset" + "golang.org/x/text/encoding/ianaindex" + + "golang.org/x/net/http/httpguts" +) + +// httpVersion represents http version. +type httpVersion string + +const ( + // h1 represents "HTTP/1.1" + h1 httpVersion = "1.1" + // h2 represents "HTTP/2.0" + h2 httpVersion = "2" + // h3 represents "HTTP/3.0" + h3 httpVersion = "3" +) + +// defaultMaxIdleConnsPerHost is the default value of Transport's +// MaxIdleConnsPerHost. +const defaultMaxIdleConnsPerHost = 2 + +// Transport is an implementation of http.RoundTripper that supports HTTP, +// HTTPS, and HTTP proxies (for either HTTP or HTTPS with CONNECT). +// +// By default, Transport caches connections for future re-use. +// This may leave many open connections when accessing many hosts. +// This behavior can be managed using Transport's CloseIdleConnections method +// and the MaxIdleConnsPerHost and DisableKeepAlives fields. +// +// Transports should be reused instead of created as needed. +// Transports are safe for concurrent use by multiple goroutines. +// +// A Transport is a low-level primitive for making HTTP and HTTPS requests. +// For high-level functionality, such as cookies and redirects, see Client. +// +// Transport uses HTTP/1.1 for HTTP URLs and either HTTP/1.1 or HTTP/2 +// for HTTPS URLs, depending on whether the server supports HTTP/2, +// and how the Transport is configured. The DefaultTransport supports HTTP/2. +// To explicitly enable HTTP/2 on a transport, use golang.org/x/net/http2 +// and call ConfigureTransport. See the package docs for more about HTTP/2. +// +// Responses with status codes in the 1xx range are either handled +// automatically (100 expect-continue) or ignored. The one +// exception is HTTP status code 101 (Switching Protocols), which is +// considered a terminal status and returned by RoundTrip. To see the +// ignored 1xx responses, use the httptrace trace package's +// ClientTrace.Got1xxResponse. +// +// Transport only retries a request upon encountering a network error +// if the request is idempotent and either has no body or has its +// Request.GetBody defined. HTTP requests are considered idempotent if +// they have HTTP methods GET, HEAD, OPTIONS, or TRACE; or if their +// Header map contains an "Idempotency-Key" or "X-Idempotency-Key" +// entry. If the idempotency key value is a zero-length slice, the +// request is treated as idempotent but the header is not sent on the +// wire. +type Transport struct { + Headers http.Header + Cookies []*http.Cookie + + idleMu sync.Mutex + closeIdle bool // user has requested to close all idle conns + idleConn map[connectMethodKey][]*persistConn // most recently used at end + idleConnWait map[connectMethodKey]wantConnQueue // waiting getConns + idleLRU connLRU + + reqMu sync.Mutex + reqCanceler map[*http.Request]context.CancelCauseFunc + + connsPerHostMu sync.Mutex + connsPerHost map[connectMethodKey]int + connsPerHostWait map[connectMethodKey]wantConnQueue // waiting getConns + dialsInProgress wantConnQueue + + altSvcJar altsvc.Jar + pendingAltSvcs map[string]*pendingAltSvc + pendingAltSvcsMu sync.Mutex + + // Force using specific http version + forceHttpVersion httpVersion + + transport.Options + + t2 *h2internal.Transport // non-nil if http2 wired up + t3 *http3.RoundTripper + + // disableAutoDecode, if true, prevents auto detect response + // body's charset and decode it to utf-8 + disableAutoDecode bool + + // autoDecodeContentType specifies an optional function for determine + // whether the response body should been auto decode to utf-8. + // Only valid when DisableAutoDecode is true. + autoDecodeContentType func(contentType string) bool + wrappedRoundTrip http.RoundTripper + httpRoundTripWrappers []HttpRoundTripWrapper +} + +// NewTransport is an alias of T +func NewTransport() *Transport { + return T() +} + +// T create a Transport. +func T() *Transport { + t := &Transport{ + Options: transport.Options{ + Proxy: http.ProxyFromEnvironment, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + TLSClientConfig: &tls.Config{NextProtos: []string{"http/1.1", "h2"}}, + }, + } + t.t2 = &h2internal.Transport{Options: &t.Options} + return t +} + +// HttpRoundTripFunc is a http.RoundTripper implementation, which is a simple function. +type HttpRoundTripFunc func(req *http.Request) (resp *http.Response, err error) + +// RoundTrip implements http.RoundTripper. +func (fn HttpRoundTripFunc) RoundTrip(req *http.Request) (*http.Response, error) { + return fn(req) +} + +// HttpRoundTripWrapper is transport middleware function. +type HttpRoundTripWrapper func(rt http.RoundTripper) http.RoundTripper + +// HttpRoundTripWrapperFunc is transport middleware function, more convenient than HttpRoundTripWrapper. +type HttpRoundTripWrapperFunc func(rt http.RoundTripper) HttpRoundTripFunc + +func (f HttpRoundTripWrapperFunc) wrapper() HttpRoundTripWrapper { + return func(rt http.RoundTripper) http.RoundTripper { + return f(rt) + } +} + +// WrapRoundTripFunc adds a transport middleware function that will give the caller +// an opportunity to wrap the underlying http.RoundTripper. +func (t *Transport) WrapRoundTripFunc(funcs ...HttpRoundTripWrapperFunc) *Transport { + var wrappers []HttpRoundTripWrapper + for _, fn := range funcs { + wrappers = append(wrappers, fn.wrapper()) + } + return t.WrapRoundTrip(wrappers...) +} + +// WrapRoundTrip adds a transport middleware function that will give the caller +// an opportunity to wrap the underlying http.RoundTripper. +func (t *Transport) WrapRoundTrip(wrappers ...HttpRoundTripWrapper) *Transport { + if len(wrappers) == 0 { + return t + } + if t.wrappedRoundTrip == nil { + t.httpRoundTripWrappers = wrappers + fn := func(req *http.Request) (*http.Response, error) { + return t.roundTrip(req) + } + t.wrappedRoundTrip = HttpRoundTripFunc(fn) + } else { + t.httpRoundTripWrappers = append(t.httpRoundTripWrappers, wrappers...) + } + + for _, w := range wrappers { + t.wrappedRoundTrip = w(t.wrappedRoundTrip) + } + return t +} + +// DisableAutoDecode disable auto-detect charset and decode to utf-8 +// (enabled by default). +func (t *Transport) DisableAutoDecode() *Transport { + t.disableAutoDecode = true + return t +} + +// EnableAutoDecode enable auto-detect charset and decode to utf-8 +// (enabled by default). +func (t *Transport) EnableAutoDecode() *Transport { + t.disableAutoDecode = false + return t +} + +// SetAutoDecodeContentTypeFunc set the function that determines whether the +// specified `Content-Type` should be auto-detected and decode to utf-8. +func (t *Transport) SetAutoDecodeContentTypeFunc(fn func(contentType string) bool) *Transport { + t.autoDecodeContentType = fn + return t +} + +// SetAutoDecodeAllContentType enable try auto-detect charset and decode all +// content type to utf-8. +func (t *Transport) SetAutoDecodeAllContentType() *Transport { + t.autoDecodeContentType = func(contentType string) bool { + return true + } + return t +} + +// SetAutoDecodeContentType set the content types that will be auto-detected and decode +// to utf-8 (e.g. "json", "xml", "html", "text"). +func (t *Transport) SetAutoDecodeContentType(contentTypes ...string) { + t.autoDecodeContentType = autoDecodeContentTypeFunc(contentTypes...) +} + +// GetMaxIdleConns returns MaxIdleConns. +func (t *Transport) GetMaxIdleConns() int { + return t.MaxIdleConns +} + +// SetMaxIdleConns set the MaxIdleConns, which controls the maximum number of idle (keep-alive) +// connections across all hosts. Zero means no limit. +func (t *Transport) SetMaxIdleConns(max int) *Transport { + t.MaxIdleConns = max + return t +} + +// SetMaxConnsPerHost set the MaxConnsPerHost, optionally limits the +// total number of connections per host, including connections in the +// dialing, active, and idle states. On limit violation, dials will block. +// +// Zero means no limit. +func (t *Transport) SetMaxConnsPerHost(max int) *Transport { + t.MaxConnsPerHost = max + return t +} + +// SetIdleConnTimeout set the IdleConnTimeout, which is the maximum +// amount of time an idle (keep-alive) connection will remain idle before +// closing itself. +// +// Zero means no limit. +func (t *Transport) SetIdleConnTimeout(timeout time.Duration) *Transport { + t.IdleConnTimeout = timeout + return t +} + +// SetTLSHandshakeTimeout set the TLSHandshakeTimeout, which specifies the +// maximum amount of time waiting to wait for a TLS handshake. +// +// Zero means no timeout. +func (t *Transport) SetTLSHandshakeTimeout(timeout time.Duration) *Transport { + t.TLSHandshakeTimeout = timeout + return t +} + +// SetResponseHeaderTimeout set the ResponseHeaderTimeout, if non-zero, specifies +// the amount of time to wait for a server's response headers after fully writing +// the request (including its body, if any). This time does not include the time +// to read the response body. +func (t *Transport) SetResponseHeaderTimeout(timeout time.Duration) *Transport { + t.ResponseHeaderTimeout = timeout + return t +} + +// SetExpectContinueTimeout set the ExpectContinueTimeout, if non-zero, specifies +// the amount of time to wait for a server's first response headers after fully +// writing the request headers if the request has an "Expect: 100-continue" header. +// Zero means no timeout and causes the body to be sent immediately, without waiting +// for the server to approve. +// This time does not include the time to send the request header. +func (t *Transport) SetExpectContinueTimeout(timeout time.Duration) *Transport { + t.ExpectContinueTimeout = timeout + return t +} + +// SetGetProxyConnectHeader set the GetProxyConnectHeader, which optionally specifies a func +// to return headers to send to proxyURL during a CONNECT request to the ip:port target. +// If it returns an error, the Transport's RoundTrip fails with that error. It can +// return (nil, nil) to not add headers. +// If GetProxyConnectHeader is non-nil, ProxyConnectHeader is ignored. +func (t *Transport) SetGetProxyConnectHeader(fn func(ctx context.Context, proxyURL *url.URL, target string) (http.Header, error)) *Transport { + t.GetProxyConnectHeader = fn + return t +} + +// SetProxyConnectHeader set the ProxyConnectHeader, which optionally specifies headers to +// send to proxies during CONNECT requests. +// To set the header dynamically, see SetGetProxyConnectHeader. +func (t *Transport) SetProxyConnectHeader(header http.Header) *Transport { + t.ProxyConnectHeader = header + return t +} + +// SetReadBufferSize set the ReadBufferSize, which specifies the size of the read buffer used +// when reading from the transport. +// If zero, a default (currently 4KB) is used. +func (t *Transport) SetReadBufferSize(size int) *Transport { + t.ReadBufferSize = size + return t +} + +// SetWriteBufferSize set the WriteBufferSize, which specifies the size of the write buffer used +// when writing to the transport. +// If zero, a default (currently 4KB) is used. +func (t *Transport) SetWriteBufferSize(size int) *Transport { + t.WriteBufferSize = size + return t +} + +// SetMaxResponseHeaderBytes set the MaxResponseHeaderBytes, which specifies a limit on how many +// response bytes are allowed in the server's response header. +// +// Zero means to use a default limit. +func (t *Transport) SetMaxResponseHeaderBytes(max int64) *Transport { + t.MaxResponseHeaderBytes = max + return t +} + +// SetHTTP2MaxHeaderListSize set the http2 MaxHeaderListSize, +// which is the http2 SETTINGS_MAX_HEADER_LIST_SIZE to +// send in the initial settings frame. It is how many bytes +// of response headers are allowed. Unlike the http2 spec, zero here +// means to use a default limit (currently 10MB). If you actually +// want to advertise an unlimited value to the peer, Transport +// interprets the highest possible value here (0xffffffff or 1<<32-1) +// to mean no limit. +func (t *Transport) SetHTTP2MaxHeaderListSize(max uint32) *Transport { + t.t2.MaxHeaderListSize = max + return t +} + +// SetHTTP2StrictMaxConcurrentStreams set the http2 +// StrictMaxConcurrentStreams, which controls whether the +// server's SETTINGS_MAX_CONCURRENT_STREAMS should be respected +// globally. If false, new TCP connections are created to the +// server as needed to keep each under the per-connection +// SETTINGS_MAX_CONCURRENT_STREAMS limit. If true, the +// server's SETTINGS_MAX_CONCURRENT_STREAMS is interpreted as +// a global limit and callers of RoundTrip block when needed, +// waiting for their turn. +func (t *Transport) SetHTTP2StrictMaxConcurrentStreams(strict bool) *Transport { + t.t2.StrictMaxConcurrentStreams = strict + return t +} + +// SetHTTP2ReadIdleTimeout set the http2 ReadIdleTimeout, +// which is the timeout after which a health check using ping +// frame will be carried out if no frame is received on the connection. +// Note that a ping response will is considered a received frame, so if +// there is no other traffic on the connection, the health check will +// be performed every ReadIdleTimeout interval. +// If zero, no health check is performed. +func (t *Transport) SetHTTP2ReadIdleTimeout(timeout time.Duration) *Transport { + t.t2.ReadIdleTimeout = timeout + return t +} + +// SetHTTP2PingTimeout set the http2 PingTimeout, which is the timeout +// after which the connection will be closed if a response to Ping is +// not received. +// Defaults to 15s +func (t *Transport) SetHTTP2PingTimeout(timeout time.Duration) *Transport { + t.t2.PingTimeout = timeout + return t +} + +// SetHTTP2WriteByteTimeout set the http2 WriteByteTimeout, which is the +// timeout after which the connection will be closed no data can be written +// to it. The timeout begins when data is available to write, and is +// extended whenever any bytes are written. +func (t *Transport) SetHTTP2WriteByteTimeout(timeout time.Duration) *Transport { + t.t2.WriteByteTimeout = timeout + return t +} + +// SetHTTP2SettingsFrame set the ordered http2 settings frame. +func (t *Transport) SetHTTP2SettingsFrame(settings ...http2.Setting) *Transport { + t.t2.Settings = settings + return t +} + +// SetHTTP2ConnectionFlow set the default http2 connection flow, which is the increment +// value of initial WINDOW_UPDATE frame. +func (t *Transport) SetHTTP2ConnectionFlow(flow uint32) *Transport { + t.t2.ConnectionFlow = flow + return t +} + +// SetHTTP2HeaderPriority set the header priority param. +func (t *Transport) SetHTTP2HeaderPriority(priority http2.PriorityParam) *Transport { + t.t2.HeaderPriority = priority + return t +} + +// SetHTTP2PriorityFrames set the ordered http2 priority frames. +func (t *Transport) SetHTTP2PriorityFrames(frames ...http2.PriorityFrame) *Transport { + t.t2.PriorityFrames = frames + return t +} + +// SetTLSClientConfig set the custom TLSClientConfig, which specifies the TLS configuration to +// use with tls.Client. +// If nil, the default configuration is used. +// If non-nil, HTTP/2 support may not be enabled by default. +func (t *Transport) SetTLSClientConfig(cfg *tls.Config) *Transport { + t.TLSClientConfig = cfg + return t +} + +// SetDebug set the optional debug function. +func (t *Transport) SetDebug(debugf func(format string, v ...interface{})) *Transport { + t.Debugf = debugf + return t +} + +// SetProxy set the http proxy, only valid for HTTP1 and HTTP2, which specifies a function +// to return a proxy for a given Request. If the function returns a non-nil error, the request +// is aborted with the provided error. +// +// The proxy type is determined by the URL scheme. "http", +// "https", and "socks5" are supported. If the scheme is empty, +// "http" is assumed. +// +// If Proxy is nil or returns a nil *URL, no proxy is used. +func (t *Transport) SetProxy(proxy func(*http.Request) (*url.URL, error)) *Transport { + t.Proxy = proxy + return t +} + +// SetDial set the custom DialContext function, only valid for HTTP1 and HTTP2, which specifies the +// dial function for creating unencrypted TCP connections. +// If it is nil, then the transport dials using package net. +// +// The dial function runs concurrently with calls to RoundTrip. +// A RoundTrip call that initiates a dial may end up using a connection dialed previously when the +// earlier connection becomes idle before the later dial function completes. +func (t *Transport) SetDial(fn func(ctx context.Context, network, addr string) (net.Conn, error)) *Transport { + t.DialContext = fn + return t +} + +// SetDialTLS set the custom DialTLSContext function, only valid for HTTP1 and HTTP2, which specifies +// an optional dial function for creating TLS connections for non-proxied HTTPS requests (proxy will +// not work if set). +// +// If it is nil, DialContext and TLSClientConfig are used. +// +// If it is set, the function that set in SetDial is not used for HTTPS requests and the TLSClientConfig +// and TLSHandshakeTimeout are ignored. The returned net.Conn is assumed to already be past the TLS handshake. +func (t *Transport) SetDialTLS(fn func(ctx context.Context, network, addr string) (net.Conn, error)) *Transport { + t.DialTLSContext = fn + return t +} + +// SetTLSHandshake set the custom tls handshake function, only valid for HTTP1 and HTTP2, not HTTP3, +// it specifies an optional dial function for tls handshake, it works even if a proxy is set, can be +// used to customize the tls fingerprint. +func (t *Transport) SetTLSHandshake(fn func(ctx context.Context, addr string, plainConn net.Conn) (conn net.Conn, tlsState *tls.ConnectionState, err error)) *Transport { + t.TLSHandshakeContext = fn + return t +} + +type pendingAltSvc struct { + CurrentIndex int + Entries []*altsvc.AltSvc + Mu sync.Mutex + LastTime time.Time + Transport http.RoundTripper +} + +// EnableForceHTTP1 enable force using HTTP1 (disabled by default). +func (t *Transport) EnableForceHTTP1() *Transport { + t.forceHttpVersion = h1 + return t +} + +// EnableForceHTTP2 enable force using HTTP2 for https requests +// (disabled by default). +func (t *Transport) EnableForceHTTP2() *Transport { + t.forceHttpVersion = h2 + return t +} + +// EnableH2C enables HTTP2 over TCP without TLS. +func (t *Transport) EnableH2C() *Transport { + t.Options.EnableH2C = true + t.t2.AllowHTTP = true + t.DialTLSContext = func(ctx context.Context, network, addr string) (net.Conn, error) { + return net.Dial(network, addr) + } + return t +} + +// DisableH2C disables HTTP2 over TCP without TLS. +func (t *Transport) DisableH2C() *Transport { + t.Options.EnableH2C = false + t.t2.AllowHTTP = false + t.t2.DialTLSContext = nil + return t +} + +// EnableForceHTTP3 enable force using HTTP3 for https requests +// (disabled by default). +func (t *Transport) EnableForceHTTP3() *Transport { + t.EnableHTTP3() + if t.t3 != nil { + t.forceHttpVersion = h3 + } + return t +} + +// DisableForceHttpVersion disable force using specified http +// version (disabled by default). +func (t *Transport) DisableForceHttpVersion() *Transport { + t.forceHttpVersion = "" + return t +} + +func (t *Transport) DisableHTTP3() { + t.altSvcJar = nil + t.pendingAltSvcs = nil + t.t3 = nil +} + +func (t *Transport) EnableHTTP3() { + if t.t3 != nil { + return + } + + v := runtime.Version() + ss := strings.Split(v, ".") + + if len(ss) < 2 || ss[0] != "go1" { + if t.Debugf != nil { + t.Debugf("bad go version format: %s", v) + } + return + } + minorVersion, err := strconv.Atoi(ss[1]) + if err != nil { + if t.Debugf != nil { + t.Debugf("bad go minor version: %s", v) + } + return + } + if minorVersion < 22 || minorVersion > 23 { + if t.Debugf != nil { + t.Debugf("%s is not support http3", v) + } + return + } + + if t.altSvcJar == nil { + t.altSvcJar = altsvc.NewAltSvcJar() + } + if t.pendingAltSvcs == nil { + t.pendingAltSvcs = make(map[string]*pendingAltSvc) + } + t3 := &http3.RoundTripper{ + Options: &t.Options, + } + t.t3 = t3 +} + +type wrapResponseBodyKeyType int + +const wrapResponseBodyKey wrapResponseBodyKeyType = iota + +type wrapResponseBodyFunc func(rc io.ReadCloser) io.ReadCloser + +func (t *Transport) handleResponseBody(res *http.Response, req *http.Request) { + if wrap, ok := req.Context().Value(wrapResponseBodyKey).(wrapResponseBodyFunc); ok { + t.wrapResponseBody(res, wrap) + } + t.autoDecodeResponseBody(res) + dump.WrapResponseBodyIfNeeded(res, req, t.Dump) +} + +var allowedProtocols = map[string]bool{ + "h3": true, +} + +func (t *Transport) handleAltSvc(req *http.Request, value string) { + addr := netutil.AuthorityKey(req.URL) + as := t.altSvcJar.GetAltSvc(addr) + if as != nil { + return + } + + t.pendingAltSvcsMu.Lock() + defer t.pendingAltSvcsMu.Unlock() + _, ok := t.pendingAltSvcs[addr] + if ok { + return + } + ass, err := altsvcutil.ParseHeader(value) + if err != nil { + if t.Debugf != nil { + t.Debugf("failed to parse alt-svc header: %s", err.Error()) + } + return + } + var entries []*altsvc.AltSvc + for _, a := range ass { + if allowedProtocols[a.Protocol] { + entries = append(entries, a) + } + } + if len(entries) > 0 { + pas := &pendingAltSvc{ + Entries: entries, + } + t.pendingAltSvcs[addr] = pas + go t.handlePendingAltSvc(req.URL, pas) + } +} + +func (t *Transport) handlePendingAltSvc(u *url.URL, pas *pendingAltSvc) { + for i := pas.CurrentIndex; i < len(pas.Entries); i++ { + switch pas.Entries[i].Protocol { + case "h3": // only support h3 in alt-svc for now + u2 := altsvcutil.ConvertURL(pas.Entries[i], u) + hostname := u2.Host + err := t.t3.AddConn(context.Background(), hostname) + if err != nil { + if t.Debugf != nil { + t.Debugf("failed to get http3 connection: %s", err.Error()) + } + } else { + pas.CurrentIndex = i + pas.Transport = t.t3 + if t.Debugf != nil { + t.Debugf("detected that the server %s supports http3, will try to use http3 protocol in subsequent requests", hostname) + } + return + } + } + } +} + +func (t *Transport) wrapResponseBody(res *http.Response, wrap wrapResponseBodyFunc) { + switch b := res.Body.(type) { + case *gzipReader: + b.body.body = wrap(b.body.body) + case compress.CompressReader: + b.SetUnderlyingBody(wrap(b.GetUnderlyingBody())) + default: + res.Body = wrap(res.Body) + } +} + +func (t *Transport) autoDecodeResponseBody(res *http.Response) { + if t.disableAutoDecode || res.Header.Get("Accept-Encoding") != "" { + return + } + contentType := res.Header.Get("Content-Type") + var shouldDecode func(contentType string) bool + if t.autoDecodeContentType != nil { + shouldDecode = t.autoDecodeContentType + } else { + shouldDecode = autoDecodeText + } + if !shouldDecode(contentType) { + return + } + _, params, err := mime.ParseMediaType(contentType) + if err != nil { + if t.Debugf != nil { + t.Debugf("failed to parse content type %q: %v", contentType, err) + } + } else if charset, ok := params["charset"]; ok { + charset = strings.ToLower(charset) + if strings.Contains(charset, "utf-8") || strings.Contains(charset, "utf8") { // do not decode utf-8 + return + } + enc, _ := htmlcharset.Lookup(charset) + if enc == nil { + enc, err = ianaindex.MIME.Encoding(charset) + if err != nil || enc == nil { + if t.Debugf != nil { + t.Debugf("ignore charset %s which is detected in Content-Type but not supported", charset) + } + return + } + } + if t.Debugf != nil { + t.Debugf("charset %s detected in Content-Type, auto-decode to utf-8", charset) + } + decodeReader := enc.NewDecoder().Reader(res.Body) + res.Body = &decodeReaderCloser{res.Body, decodeReader} + return + } + res.Body = newAutoDecodeReadCloser(res.Body, t) +} + +func (t *Transport) writeBufferSize() int { + if t.WriteBufferSize > 0 { + return t.WriteBufferSize + } + return 4 << 10 +} + +func (t *Transport) readBufferSize() int { + if t.ReadBufferSize > 0 { + return t.ReadBufferSize + } + return 4 << 10 +} + +// Clone returns a deep copy of t's exported fields. +func (t *Transport) Clone() *Transport { + tt := &Transport{ + Headers: t.Headers.Clone(), + Cookies: cloneSlice(t.Cookies), + Options: t.Options.Clone(), + disableAutoDecode: t.disableAutoDecode, + autoDecodeContentType: t.autoDecodeContentType, + forceHttpVersion: t.forceHttpVersion, + httpRoundTripWrappers: t.httpRoundTripWrappers, + } + if len(tt.httpRoundTripWrappers) > 0 { // clone transport middleware + fn := func(req *http.Request) (*http.Response, error) { + return tt.roundTrip(req) + } + tt.wrappedRoundTrip = HttpRoundTripFunc(fn) + for _, w := range tt.httpRoundTripWrappers { + tt.wrappedRoundTrip = w(tt.wrappedRoundTrip) + } + } + if t.t2 != nil { + tt.t2 = &h2internal.Transport{ + Options: &tt.Options, + MaxHeaderListSize: t.t2.MaxHeaderListSize, + StrictMaxConcurrentStreams: t.t2.StrictMaxConcurrentStreams, + ReadIdleTimeout: t.t2.ReadIdleTimeout, + PingTimeout: t.t2.PingTimeout, + WriteByteTimeout: t.t2.WriteByteTimeout, + ConnectionFlow: t.t2.ConnectionFlow, + Settings: cloneSlice(t.t2.Settings), + HeaderPriority: t.t2.HeaderPriority, + PriorityFrames: cloneSlice(t.t2.PriorityFrames), + } + } + if t.t3 != nil { + tt.EnableHTTP3() + } + return tt +} + +// EnableDump enables the dump for all requests with specified dump options. +func (t *Transport) EnableDump(opt *DumpOptions) { + dump := newDumper(opt) + t.Dump = dump + go dump.Start() +} + +// DisableDump disables the dump. +func (t *Transport) DisableDump() { + if t.Dump != nil { + t.Dump.Stop() + t.Dump = nil + } +} + +func (t *Transport) hasCustomTLSDialer() bool { + return t.DialTLSContext != nil +} + +// transportRequest is a wrapper around a *Request that adds +// optional extra headers to write and stores any error to return +// from roundTrip. +type transportRequest struct { + *http.Request // original request, not to be mutated + extra http.Header // extra headers to write, or nil + trace *httptrace.ClientTrace // optional + + ctx context.Context // canceled when we are done with the request + cancel context.CancelCauseFunc + + mu sync.Mutex // guards err + err error // first setError value for mapRoundTripError to consider +} + +func (tr *transportRequest) extraHeaders() http.Header { + if tr.extra == nil { + tr.extra = make(http.Header) + } + return tr.extra +} + +func (tr *transportRequest) setError(err error) { + tr.mu.Lock() + if tr.err == nil { + tr.err = err + } + tr.mu.Unlock() +} + +func (t *Transport) roundTripAltSvc(req *http.Request, as *altsvc.AltSvc) (resp *http.Response, err error) { + r := req.Clone(req.Context()) + r.URL = altsvcutil.ConvertURL(as, req.URL) + switch as.Protocol { + case "h3": + resp, err = t.t3.RoundTrip(r) + case "h2": + resp, err = t.t2.RoundTrip(r) + default: + // impossible! + panic(fmt.Sprintf("unknown protocol %q", as.Protocol)) + } + return +} + +func (t *Transport) checkAltSvc(req *http.Request) (resp *http.Response, err error) { + if t.altSvcJar == nil { + return + } + addr := netutil.AuthorityKey(req.URL) + pas, ok := t.pendingAltSvcs[addr] + if ok && pas.Transport != nil { + pas.Mu.Lock() + if pas.Transport != nil { + pas.LastTime = time.Now() + r := req.Clone(req.Context()) + r.URL = altsvcutil.ConvertURL(pas.Entries[pas.CurrentIndex], req.URL) + resp, err = pas.Transport.RoundTrip(r) + if err != nil { + pas.Transport = nil + if pas.CurrentIndex+1 < len(pas.Entries) { + pas.CurrentIndex++ + go t.handlePendingAltSvc(req.URL, pas) + } + } else { + t.altSvcJar.SetAltSvc(addr, pas.Entries[pas.CurrentIndex]) + delete(t.pendingAltSvcs, addr) + } + } + pas.Mu.Unlock() + return + } + if as := t.altSvcJar.GetAltSvc(addr); as != nil { + return t.roundTripAltSvc(req, as) + } + return +} + +func validateHeaders(hdrs http.Header) string { + for k, vv := range hdrs { + if !httpguts.ValidHeaderFieldName(k) { + return fmt.Sprintf("field name %q", k) + } + for _, v := range vv { + if !httpguts.ValidHeaderFieldValue(v) { + // Don't include the value in the error, + // because it may be sensitive. + return fmt.Sprintf("field value for %q", k) + } + } + } + return "" +} + +// roundTrip implements a http.RoundTripper over HTTP. +func (t *Transport) roundTrip(req *http.Request) (resp *http.Response, err error) { + ctx := req.Context() + trace := httptrace.ContextClientTrace(ctx) + + if req.URL == nil { + closeBody(req) + return nil, errors.New("http: nil Request.URL") + } + + resp, err = t.checkAltSvc(req) + if err != nil || resp != nil { + return + } + + scheme := req.URL.Scheme + isHTTP := scheme == "http" || scheme == "https" + + if isHTTP { + // Validate the outgoing headers. + if err := validateHeaders(req.Header); err != "" { + closeBody(req) + return nil, fmt.Errorf("net/http: invalid header %s", err) + } + + // Validate the outgoing trailers too. + if err := validateHeaders(req.Trailer); err != "" { + closeBody(req) + return nil, fmt.Errorf("net/http: invalid trailer %s", err) + } + } + + if req.Header == nil { + req.Header = make(http.Header) + } + + if t.forceHttpVersion != "" { + switch t.forceHttpVersion { + case h3: + return t.t3.RoundTrip(req) + case h2: + return t.t2.RoundTrip(req) + } + } + + origReq := req + req = setupRewindBody(req) + + if scheme == "https" && t.forceHttpVersion != h1 { + resp, err := t.t2.RoundTripOnlyCachedConn(req) + if err != h2internal.ErrNoCachedConn { + return resp, err + } + req, err = rewindBody(req) + if err != nil { + return nil, err + } + if t.t3 != nil { + resp, err = t.t3.RoundTripOnlyCachedConn(req) + if err != http3.ErrNoCachedConn { + return resp, err + } + req, err = rewindBody(req) + if err != nil { + return nil, err + } + } + } + + if !isHTTP { + closeBody(req) + return nil, badStringError("unsupported protocol scheme", scheme) + } + if req.Method != "" && !validMethod(req.Method) { + closeBody(req) + return nil, fmt.Errorf("net/http: invalid method %q", req.Method) + } + if req.URL.Host == "" { + closeBody(req) + return nil, errors.New("http: no Host in request URL") + } + + // Transport request context. + // + // If RoundTrip returns an error, it cancels this context before returning. + // + // If RoundTrip returns no error: + // - For an HTTP/1 request, persistConn.readLoop cancels this context + // after reading the request body. + // - For an HTTP/2 request, RoundTrip cancels this context after the HTTP/2 + // RoundTripper returns. + ctx, cancel := context.WithCancelCause(req.Context()) + + // Convert Request.Cancel into context cancelation. + if origReq.Cancel != nil { + go awaitLegacyCancel(ctx, cancel, origReq) + } + + // Convert Transport.CancelRequest into context cancelation. + // + // This is lamentably expensive. CancelRequest has been deprecated for a long time + // and doesn't work on HTTP/2 requests. Perhaps we should drop support for it entirely. + cancel = t.prepareTransportCancel(origReq, cancel) + + defer func() { + if err != nil { + cancel(err) + } + }() + + for { + select { + case <-ctx.Done(): + closeBody(req) + return nil, context.Cause(ctx) + default: + } + + // treq gets modified by roundTrip, so we need to recreate for each retry. + treq := &transportRequest{Request: req, trace: trace, ctx: ctx, cancel: cancel} + cm, err := t.connectMethodForRequest(treq) + if err != nil { + closeBody(req) + return nil, err + } + + // Get the cached or newly-created connection to either the + // host (for http or https), the http proxy, or the http proxy + // pre-CONNECTed to https server. In any case, we'll be ready + // to send it requests. + pconn, err := t.getConn(treq, cm) + if err != nil { + closeBody(req) + return nil, err + } + + var resp *http.Response + if t.forceHttpVersion != h1 && pconn.alt != nil { + // HTTP/2 path. + resp, err = pconn.alt.RoundTrip(req) + } else { + resp, err = pconn.roundTrip(treq) + } + if err == nil { + if pconn.alt != nil { + // HTTP/2 requests are not cancelable with CancelRequest, + // so we have no further need for the request context. + // + // On the HTTP/1 path, roundTrip takes responsibility for + // canceling the context after the response body is read. + cancel(errRequestDone) + } + + resp.Request = origReq + return resp, nil + } + + // Failed. Clean up and determine whether to retry. + if h2internal.IsNoCachedConnError(err) { + if t.removeIdleConn(pconn) { + t.decConnsPerHost(pconn.cacheKey) + } + } else if !pconn.shouldRetryRequest(req, err) { + // Issue 16465: return underlying net.Conn.Read error from peek, + // as we've historically done. + if e, ok := err.(nothingWrittenError); ok { + err = e.error + } + if e, ok := err.(transportReadFromServerError); ok { + err = e.err + } + if b, ok := req.Body.(*readTrackingBody); ok && !b.didClose { + // Issue 49621: Close the request body if pconn.roundTrip + // didn't do so already. This can happen if the pconn + // write loop exits without reading the write request. + closeBody(req) + } + return nil, err + } + testHookRoundTripRetried() + + // Rewind the body if we're able to. + req, err = rewindBody(req) + if err != nil { + return nil, err + } + } +} + +func awaitLegacyCancel(ctx context.Context, cancel context.CancelCauseFunc, req *http.Request) { + select { + case <-req.Cancel: + cancel(common.ErrRequestCanceled) + case <-ctx.Done(): + } +} + +var errCannotRewind = errors.New("net/http: cannot rewind body after connection loss") + +type readTrackingBody struct { + io.ReadCloser + didRead bool + didClose bool +} + +func (r *readTrackingBody) Read(data []byte) (int, error) { + r.didRead = true + return r.ReadCloser.Read(data) +} + +func (r *readTrackingBody) Close() error { + r.didClose = true + return r.ReadCloser.Close() +} + +// setupRewindBody returns a new request with a custom body wrapper +// that can report whether the body needs rewinding. +// This lets rewindBody avoid an error result when the request +// does not have GetBody but the body hasn't been read at all yet. +func setupRewindBody(req *http.Request) *http.Request { + if req.Body == nil || req.Body == NoBody { + return req + } + newReq := *req + newReq.Body = &readTrackingBody{ReadCloser: req.Body} + return &newReq +} + +// rewindBody returns a new request with the body rewound. +// It returns req unmodified if the body does not need rewinding. +// rewindBody takes care of closing req.Body when appropriate +// (in all cases except when rewindBody returns req unmodified). +func rewindBody(req *http.Request) (rewound *http.Request, err error) { + if req.Body == nil || req.Body == NoBody || (!req.Body.(*readTrackingBody).didRead && !req.Body.(*readTrackingBody).didClose) { + return req, nil // nothing to rewind + } + if !req.Body.(*readTrackingBody).didClose { + closeBody(req) + } + if req.GetBody == nil { + return nil, errCannotRewind + } + body, err := req.GetBody() + if err != nil { + return nil, err + } + newReq := *req + newReq.Body = &readTrackingBody{ReadCloser: body} + return &newReq, nil +} + +// shouldRetryRequest reports whether we should retry sending a failed +// HTTP request on a new connection. The non-nil input error is the +// error from roundTrip. +func (pc *persistConn) shouldRetryRequest(req *http.Request, err error) bool { + if h2internal.IsNoCachedConnError(err) { + // Issue 16582: if the user started a bunch of + // requests at once, they can all pick the same conn + // and violate the server's max concurrent streams. + // Instead, match the HTTP/1 behavior for now and dial + // again to get a new TCP connection, rather than failing + // this request. + return true + } + if err == errMissingHost { + // User error. + return false + } + if !pc.isReused() { + // This was a fresh connection. There's no reason the server + // should've hung up on us. + // + // Also, if we retried now, we could loop forever + // creating new connections and retrying if the server + // is just hanging up on us because it doesn't like + // our request (as opposed to sending an error). + return false + } + if _, ok := err.(nothingWrittenError); ok { + // We never wrote anything, so it's safe to retry, if there's no body or we + // can "rewind" the body with GetBody. + return outgoingLength(req) == 0 || req.GetBody != nil + } + if !isReplayable(req) { + // Don't retry non-idempotent requests. + return false + } + if _, ok := err.(transportReadFromServerError); ok { + // We got some non-EOF net.Conn.Read failure reading + // the 1st response byte from the server. + return true + } + if err == errServerClosedIdle { + // The server replied with io.EOF while we were trying to + // read the response. Probably an unfortunately keep-alive + // timeout, just as the client was writing a request. + return true + } + return false // conservatively +} + +// CloseIdleConnections closes any connections which were previously +// connected from previous requests but are now sitting idle in +// a "keep-alive" state. It does not interrupt any connections currently +// in use. +func (t *Transport) CloseIdleConnections() { + t.idleMu.Lock() + m := t.idleConn + t.idleConn = nil + t.closeIdle = true // close newly idle connections + t.idleLRU = connLRU{} + t.idleMu.Unlock() + for _, conns := range m { + for _, pconn := range conns { + pconn.close(errCloseIdleConns) + } + } + t.connsPerHostMu.Lock() + t.dialsInProgress.all(func(w *wantConn) { + if w.cancelCtx != nil && !w.waiting() { + w.cancelCtx() + } + }) + t.connsPerHostMu.Unlock() + + if t2 := t.t2; t2 != nil { + t2.CloseIdleConnections() + } +} + +// prepareTransportCancel sets up state to convert Transport.CancelRequest into context cancelation. +func (t *Transport) prepareTransportCancel(req *http.Request, origCancel context.CancelCauseFunc) context.CancelCauseFunc { + // Historically, RoundTrip has not modified the Request in any way. + // We could avoid the need to keep a map of all in-flight requests by adding + // a field to the Request containing its cancel func, and setting that field + // while the request is in-flight. Callers aren't supposed to reuse a Request + // until after the response body is closed, so this wouldn't violate any + // concurrency guarantees. + cancel := func(err error) { + origCancel(err) + t.reqMu.Lock() + delete(t.reqCanceler, req) + t.reqMu.Unlock() + } + t.reqMu.Lock() + if t.reqCanceler == nil { + t.reqCanceler = make(map[*http.Request]context.CancelCauseFunc) + } + t.reqCanceler[req] = cancel + t.reqMu.Unlock() + return cancel +} + +// CancelRequest cancels an in-flight request by closing its connection. +// CancelRequest should only be called after [Transport.RoundTrip] has returned. +// +// Deprecated: Use [Request.WithContext] to create a request with a +// cancelable context instead. CancelRequest cannot cancel HTTP/2 +// requests. This may become a no-op in a future release of Go. +func (t *Transport) CancelRequest(req *http.Request) { + t.reqMu.Lock() + cancel := t.reqCanceler[req] + t.reqMu.Unlock() + if cancel != nil { + cancel(common.ErrRequestCanceled) + } +} + +// resetProxyConfig is used by tests. +func resetProxyConfig() { +} + +func (t *Transport) connectMethodForRequest(treq *transportRequest) (cm connectMethod, err error) { + cm.targetScheme = treq.URL.Scheme + cm.targetAddr = canonicalAddr(treq.URL) + if t.Proxy != nil { + cm.proxyURL, err = t.Proxy(treq.Request) + } + cm.onlyH1 = t.forceHttpVersion == h1 || requestRequiresHTTP1(treq.Request) + return cm, err +} + +// proxyAuth returns the Proxy-Authorization header to set +// on requests, if applicable. +func (cm *connectMethod) proxyAuth() string { + if cm.proxyURL == nil { + return "" + } + if u := cm.proxyURL.User; u != nil { + username := u.Username() + password, _ := u.Password() + return "Basic " + basicAuth(username, password) + } + return "" +} + +// error values for debugging and testing, not seen by users. +var ( + errKeepAlivesDisabled = errors.New("http: putIdleConn: keep alives disabled") + errConnBroken = errors.New("http: putIdleConn: connection is in bad state") + errCloseIdle = errors.New("http: putIdleConn: CloseIdleConnections was called") + errTooManyIdle = errors.New("http: putIdleConn: too many idle connections") + errTooManyIdleHost = errors.New("http: putIdleConn: too many idle connections for host") + errCloseIdleConns = errors.New("http: CloseIdleConnections called") + errReadLoopExiting = errors.New("http: persistConn.readLoop exiting") + errIdleConnTimeout = errors.New("http: idle connection timeout") + + // errServerClosedIdle is not seen by users for idempotent requests, but may be + // seen by a user if the server shuts down an idle connection and sends its FIN + // in flight with already-written POST body bytes from the client. + // See https://github.com/golang/go/issues/19943#issuecomment-355607646 + errServerClosedIdle = errors.New("http: server closed idle connection") +) + +// transportReadFromServerError is used by Transport.readLoop when the +// 1 byte peek read fails and we're actually anticipating a response. +// Usually this is just due to the inherent keep-alive shut down race, +// where the server closed the connection at the same time the client +// wrote. The underlying err field is usually io.EOF or some +// ECONNRESET sort of thing which varies by platform. But it might be +// the user's custom net.Conn.Read error too, so we carry it along for +// them to return from Transport.RoundTrip. +type transportReadFromServerError struct { + err error +} + +func (e transportReadFromServerError) Unwrap() error { return e.err } + +func (e transportReadFromServerError) Error() string { + return fmt.Sprintf("net/http: Transport failed to read from server: %v", e.err) +} + +func (t *Transport) putOrCloseIdleConn(pconn *persistConn) { + if err := t.tryPutIdleConn(pconn); err != nil { + pconn.close(err) + } +} + +func (t *Transport) maxIdleConnsPerHost() int { + if v := t.MaxIdleConnsPerHost; v != 0 { + return v + } + return defaultMaxIdleConnsPerHost +} + +// tryPutIdleConn adds pconn to the list of idle persistent connections awaiting +// a new request. +// If pconn is no longer needed or not in a good state, tryPutIdleConn returns +// an error explaining why it wasn't registered. +// tryPutIdleConn does not close pconn. Use putOrCloseIdleConn instead for that. +func (t *Transport) tryPutIdleConn(pconn *persistConn) error { + if t.DisableKeepAlives || t.MaxIdleConnsPerHost < 0 { + return errKeepAlivesDisabled + } + if pconn.isBroken() { + return errConnBroken + } + pconn.markReused() + + t.idleMu.Lock() + defer t.idleMu.Unlock() + + // HTTP/2 (pconn.alt != nil) connections do not come out of the idle list, + // because multiple goroutines can use them simultaneously. + // If this is an HTTP/2 connection being 鈥渞eturned,鈥 we're done. + if pconn.alt != nil && t.idleLRU.m[pconn] != nil { + return nil + } + + // Deliver pconn to goroutine waiting for idle connection, if any. + // (They may be actively dialing, but this conn is ready first. + // Chrome calls this socket late binding. + // See https://www.chromium.org/developers/design-documents/network-stack#TOC-Connection-Management.) + key := pconn.cacheKey + if q, ok := t.idleConnWait[key]; ok { + done := false + if pconn.alt == nil { + // HTTP/1. + // Loop over the waiting list until we find a w that isn't done already, and hand it pconn. + for q.len() > 0 { + w := q.popFront() + if w.tryDeliver(pconn, nil, time.Time{}) { + done = true + break + } + } + } else { + // HTTP/2. + // Can hand the same pconn to everyone in the waiting list, + // and we still won't be done: we want to put it in the idle + // list unconditionally, for any future clients too. + for q.len() > 0 { + w := q.popFront() + w.tryDeliver(pconn, nil, time.Time{}) + } + } + if q.len() == 0 { + delete(t.idleConnWait, key) + } else { + t.idleConnWait[key] = q + } + if done { + return nil + } + } + + if t.closeIdle { + return errCloseIdle + } + if t.idleConn == nil { + t.idleConn = make(map[connectMethodKey][]*persistConn) + } + idles := t.idleConn[key] + if len(idles) >= t.maxIdleConnsPerHost() { + return errTooManyIdleHost + } + for _, exist := range idles { + if exist == pconn { + log.Fatalf("dup idle pconn %p in freelist", pconn) + } + } + t.idleConn[key] = append(idles, pconn) + t.idleLRU.add(pconn) + if t.MaxIdleConns != 0 && t.idleLRU.len() > t.MaxIdleConns { + oldest := t.idleLRU.removeOldest() + oldest.close(errTooManyIdle) + t.removeIdleConnLocked(oldest) + } + + // Set idle timer, but only for HTTP/1 (pconn.alt == nil). + // The HTTP/2 implementation manages the idle timer itself + // (see idleConnTimeout in h2_bundle.go). + if t.IdleConnTimeout > 0 && pconn.alt == nil { + if pconn.idleTimer != nil { + pconn.idleTimer.Reset(t.IdleConnTimeout) + } else { + pconn.idleTimer = time.AfterFunc(t.IdleConnTimeout, pconn.closeConnIfStillIdle) + } + } + pconn.idleAt = time.Now() + return nil +} + +// queueForIdleConn queues w to receive the next idle connection for w.cm. +// As an optimization hint to the caller, queueForIdleConn reports whether +// it successfully delivered an already-idle connection. +func (t *Transport) queueForIdleConn(w *wantConn) (delivered bool) { + if t.DisableKeepAlives { + return false + } + + t.idleMu.Lock() + defer t.idleMu.Unlock() + + // Stop closing connections that become idle - we might want one. + // (That is, undo the effect of t.CloseIdleConnections.) + t.closeIdle = false + + if w == nil { + // Happens in test hook. + return false + } + + // If IdleConnTimeout is set, calculate the oldest + // persistConn.idleAt time we're willing to use a cached idle + // conn. + var oldTime time.Time + if t.IdleConnTimeout > 0 { + oldTime = time.Now().Add(-t.IdleConnTimeout) + } + + // Look for most recently-used idle connection. + if list, ok := t.idleConn[w.key]; ok { + stop := false + delivered := false + for len(list) > 0 && !stop { + pconn := list[len(list)-1] + + // See whether this connection has been idle too long, considering + // only the wall time (the Round(0)), in case this is a laptop or VM + // coming out of suspend with previously cached idle connections. + tooOld := !oldTime.IsZero() && pconn.idleAt.Round(0).Before(oldTime) + if tooOld { + // Async cleanup. Launch in its own goroutine (as if a + // time.AfterFunc called it); it acquires idleMu, which we're + // holding, and does a synchronous net.Conn.Close. + go pconn.closeConnIfStillIdle() + } + if pconn.isBroken() || tooOld { + // If either persistConn.readLoop has marked the connection + // broken, but Transport.removeIdleConn has not yet removed it + // from the idle list, or if this persistConn is too old (it was + // idle too long), then ignore it and look for another. In both + // cases it's already in the process of being closed. + list = list[:len(list)-1] + continue + } + delivered = w.tryDeliver(pconn, nil, pconn.idleAt) + if delivered { + if pconn.alt != nil { + // HTTP/2: multiple clients can share pconn. + // Leave it in the list. + } else { + // HTTP/1: only one client can use pconn. + // Remove it from the list. + t.idleLRU.remove(pconn) + list = list[:len(list)-1] + } + } + stop = true + } + if len(list) > 0 { + t.idleConn[w.key] = list + } else { + delete(t.idleConn, w.key) + } + if stop { + return delivered + } + } + + // Register to receive next connection that becomes idle. + if t.idleConnWait == nil { + t.idleConnWait = make(map[connectMethodKey]wantConnQueue) + } + q := t.idleConnWait[w.key] + q.cleanFrontNotWaiting() + q.pushBack(w) + t.idleConnWait[w.key] = q + return false +} + +// removeIdleConn marks pconn as dead. +func (t *Transport) removeIdleConn(pconn *persistConn) bool { + t.idleMu.Lock() + defer t.idleMu.Unlock() + return t.removeIdleConnLocked(pconn) +} + +// t.idleMu must be held. +func (t *Transport) removeIdleConnLocked(pconn *persistConn) bool { + if pconn.idleTimer != nil { + pconn.idleTimer.Stop() + } + t.idleLRU.remove(pconn) + key := pconn.cacheKey + pconns := t.idleConn[key] + var removed bool + switch len(pconns) { + case 0: + // Nothing + case 1: + if pconns[0] == pconn { + delete(t.idleConn, key) + removed = true + } + default: + for i, v := range pconns { + if v != pconn { + continue + } + // Slide down, keeping most recently-used + // conns at the end. + copy(pconns[i:], pconns[i+1:]) + t.idleConn[key] = pconns[:len(pconns)-1] + removed = true + break + } + } + return removed +} + +var zeroDialer net.Dialer + +func (t *Transport) dial(ctx context.Context, network, addr string) (net.Conn, error) { + if t.DialContext != nil { + c, err := t.DialContext(ctx, network, addr) + if c == nil && err == nil { + err = errors.New("net/http: Transport.DialContext hook returned (nil, nil)") + } + return c, err + } + return zeroDialer.DialContext(ctx, network, addr) +} + +// A wantConn records state about a wanted connection +// (that is, an active call to getConn). +// The conn may be gotten by dialing or by finding an idle connection, +// or a cancellation may make the conn no longer wanted. +// These three options are racing against each other and use +// wantConn to coordinate and agree about the winning outcome. +type wantConn struct { + cm connectMethod + key connectMethodKey // cm.key() + + // hooks for testing to know when dials are done + // beforeDial is called in the getConn goroutine when the dial is queued. + // afterDial is called when the dial is completed or canceled. + beforeDial func() + afterDial func() + + mu sync.Mutex // protects ctx, done and sending of the result + ctx context.Context // context for dial, cleared after delivered or canceled + cancelCtx context.CancelFunc + done bool // true after delivered or canceled + result chan connOrError // channel to deliver connection or error +} + +type connOrError struct { + pc *persistConn + err error + idleAt time.Time +} + +// waiting reports whether w is still waiting for an answer (connection or error). +func (w *wantConn) waiting() bool { + w.mu.Lock() + defer w.mu.Unlock() + + return !w.done +} + +// getCtxForDial returns context for dial or nil if connection was delivered or canceled. +func (w *wantConn) getCtxForDial() context.Context { + w.mu.Lock() + defer w.mu.Unlock() + + return w.ctx +} + +// tryDeliver attempts to deliver pc, err to w and reports whether it succeeded. +func (w *wantConn) tryDeliver(pc *persistConn, err error, idleAt time.Time) bool { + w.mu.Lock() + defer w.mu.Unlock() + + if w.done { + return false + } + if (pc == nil) == (err == nil) { + panic("net/http: internal error: misuse of tryDeliver") + } + w.ctx = nil + w.done = true + + w.result <- connOrError{pc: pc, err: err, idleAt: idleAt} + close(w.result) + + return true +} + +// cancel marks w as no longer wanting a result (for example, due to cancellation). +// If a connection has been delivered already, cancel returns it with t.putOrCloseIdleConn. +func (w *wantConn) cancel(t *Transport, err error) { + w.mu.Lock() + var pc *persistConn + if w.done { + if r, ok := <-w.result; ok { + pc = r.pc + } + } else { + close(w.result) + } + w.ctx = nil + w.done = true + w.mu.Unlock() + + if pc != nil { + t.putOrCloseIdleConn(pc) + } +} + +// A wantConnQueue is a queue of wantConns. +type wantConnQueue struct { + // This is a queue, not a deque. + // It is split into two stages - head[headPos:] and tail. + // popFront is trivial (headPos++) on the first stage, and + // pushBack is trivial (append) on the second stage. + // If the first stage is empty, popFront can swap the + // first and second stages to remedy the situation. + // + // This two-stage split is analogous to the use of two lists + // in Okasaki's purely functional queue but without the + // overhead of reversing the list when swapping stages. + head []*wantConn + headPos int + tail []*wantConn +} + +// len returns the number of items in the queue. +func (q *wantConnQueue) len() int { + return len(q.head) - q.headPos + len(q.tail) +} + +// pushBack adds w to the back of the queue. +func (q *wantConnQueue) pushBack(w *wantConn) { + q.tail = append(q.tail, w) +} + +// popFront removes and returns the wantConn at the front of the queue. +func (q *wantConnQueue) popFront() *wantConn { + if q.headPos >= len(q.head) { + if len(q.tail) == 0 { + return nil + } + // Pick up tail as new head, clear tail. + q.head, q.headPos, q.tail = q.tail, 0, q.head[:0] + } + w := q.head[q.headPos] + q.head[q.headPos] = nil + q.headPos++ + return w +} + +// peekFront returns the wantConn at the front of the queue without removing it. +func (q *wantConnQueue) peekFront() *wantConn { + if q.headPos < len(q.head) { + return q.head[q.headPos] + } + if len(q.tail) > 0 { + return q.tail[0] + } + return nil +} + +// cleanFrontNotWaiting pops any wantConns that are no longer waiting from the head of the +// queue, reporting whether any were popped. +func (q *wantConnQueue) cleanFrontNotWaiting() (cleaned bool) { + for { + w := q.peekFront() + if w == nil || w.waiting() { + return cleaned + } + q.popFront() + cleaned = true + } +} + +// cleanFrontCanceled pops any wantConns with canceled dials from the head of the queue. +func (q *wantConnQueue) cleanFrontCanceled() { + for { + w := q.peekFront() + if w == nil || w.cancelCtx != nil { + return + } + q.popFront() + } +} + +// all iterates over all wantConns in the queue. +// The caller must not modify the queue while iterating. +func (q *wantConnQueue) all(f func(*wantConn)) { + for _, w := range q.head[q.headPos:] { + f(w) + } + for _, w := range q.tail { + f(w) + } +} + +func (t *Transport) customDialTLS(ctx context.Context, network, addr string) (conn net.Conn, err error) { + conn, err = t.DialTLSContext(ctx, network, addr) + + if conn == nil && err == nil { + err = errors.New("net/http: Transport.DialTLS or DialTLSContext returned (nil, nil)") + } + return +} + +// getConn dials and creates a new persistConn to the target as +// specified in the connectMethod. This includes doing a proxy CONNECT +// and/or setting up TLS. If this doesn't return an error, the persistConn +// is ready to write requests to. +func (t *Transport) getConn(treq *transportRequest, cm connectMethod) (pc *persistConn, err error) { + req := treq.Request + trace := treq.trace + ctx := req.Context() + if trace != nil && trace.GetConn != nil { + trace.GetConn(cm.addr()) + } + + // Detach from the request context's cancellation signal. + // The dial should proceed even if the request is canceled, + // because a future request may be able to make use of the connection. + // + // We retain the request context's values. + dialCtx, dialCancel := context.WithCancel(context.WithoutCancel(ctx)) + + w := &wantConn{ + cm: cm, + key: cm.key(), + ctx: dialCtx, + cancelCtx: dialCancel, + result: make(chan connOrError, 1), + beforeDial: testHookPrePendingDial, + afterDial: testHookPostPendingDial, + } + defer func() { + if err != nil { + w.cancel(t, err) + } + }() + + // Queue for idle connection. + if delivered := t.queueForIdleConn(w); !delivered { + t.queueForDial(w) + } + + // Wait for completion or cancellation. + select { + case r := <-w.result: + // Trace success but only for HTTP/1. + // HTTP/2 calls trace.GotConn itself. + if r.pc != nil && r.pc.alt == nil && trace != nil && trace.GotConn != nil { + info := httptrace.GotConnInfo{ + Conn: r.pc.conn, + Reused: r.pc.isReused(), + } + if !r.idleAt.IsZero() { + info.WasIdle = true + info.IdleTime = time.Since(r.idleAt) + } + trace.GotConn(info) + } + if r.err != nil { + // If the request has been canceled, that's probably + // what caused r.err; if so, prefer to return the + // cancellation error (see golang.org/issue/16049). + select { + case <-treq.ctx.Done(): + err := context.Cause(treq.ctx) + if err == common.ErrRequestCanceled { + err = errRequestCanceledConn + } + return nil, err + default: + // return below + } + } + return r.pc, r.err + case <-treq.ctx.Done(): + err := context.Cause(treq.ctx) + if err == common.ErrRequestCanceled { + err = errRequestCanceledConn + } + return nil, err + } +} + +// queueForDial queues w to wait for permission to begin dialing. +// Once w receives permission to dial, it will do so in a separate goroutine. +func (t *Transport) queueForDial(w *wantConn) { + w.beforeDial() + + t.connsPerHostMu.Lock() + defer t.connsPerHostMu.Unlock() + + if t.MaxConnsPerHost <= 0 { + t.startDialConnForLocked(w) + return + } + + if n := t.connsPerHost[w.key]; n < t.MaxConnsPerHost { + if t.connsPerHost == nil { + t.connsPerHost = make(map[connectMethodKey]int) + } + t.connsPerHost[w.key] = n + 1 + t.startDialConnForLocked(w) + return + } + + if t.connsPerHostWait == nil { + t.connsPerHostWait = make(map[connectMethodKey]wantConnQueue) + } + q := t.connsPerHostWait[w.key] + q.cleanFrontNotWaiting() + q.pushBack(w) + t.connsPerHostWait[w.key] = q +} + +// startDialConnFor calls dialConn in a new goroutine. +// t.connsPerHostMu must be held. +func (t *Transport) startDialConnForLocked(w *wantConn) { + t.dialsInProgress.cleanFrontCanceled() + t.dialsInProgress.pushBack(w) + go func() { + t.dialConnFor(w) + t.connsPerHostMu.Lock() + defer t.connsPerHostMu.Unlock() + w.cancelCtx = nil + }() +} + +// dialConnFor dials on behalf of w and delivers the result to w. +// dialConnFor has received permission to dial w.cm and is counted in t.connCount[w.cm.key()]. +// If the dial is canceled or unsuccessful, dialConnFor decrements t.connCount[w.cm.key()]. +func (t *Transport) dialConnFor(w *wantConn) { + defer w.afterDial() + ctx := w.getCtxForDial() + if ctx == nil { + t.decConnsPerHost(w.key) + return + } + + pc, err := t.dialConn(ctx, w.cm) + delivered := w.tryDeliver(pc, err, time.Time{}) + if err == nil && (!delivered || pc.alt != nil) { + // pconn was not passed to w, + // or it is HTTP/2 and can be shared. + // Add to the idle connection pool. + t.putOrCloseIdleConn(pc) + } + if err != nil { + t.decConnsPerHost(w.key) + } +} + +// decConnsPerHost decrements the per-host connection count for key, +// which may in turn give a different waiting goroutine permission to dial. +func (t *Transport) decConnsPerHost(key connectMethodKey) { + if t.MaxConnsPerHost <= 0 { + return + } + + t.connsPerHostMu.Lock() + defer t.connsPerHostMu.Unlock() + n := t.connsPerHost[key] + if n == 0 { + // Shouldn't happen, but if it does, the counting is buggy and could + // easily lead to a silent deadlock, so report the problem loudly. + panic("net/http: internal error: connCount underflow") + } + + // Can we hand this count to a goroutine still waiting to dial? + // (Some goroutines on the wait list may have timed out or + // gotten a connection another way. If they're all gone, + // we don't want to kick off any spurious dial operations.) + if q := t.connsPerHostWait[key]; q.len() > 0 { + done := false + for q.len() > 0 { + w := q.popFront() + if w.waiting() { + t.startDialConnForLocked(w) + done = true + break + } + } + if q.len() == 0 { + delete(t.connsPerHostWait, key) + } else { + // q is a value (like a slice), so we have to store + // the updated q back into the map. + t.connsPerHostWait[key] = q + } + if done { + return + } + } + + // Otherwise, decrement the recorded count. + if n--; n == 0 { + delete(t.connsPerHost, key) + } else { + t.connsPerHost[key] = n + } +} + +// Add TLS to a persistent connection, i.e. negotiate a TLS session. If pconn is already a TLS +// tunnel, this function establishes a nested TLS session inside the encrypted channel. +// The remote endpoint's name may be overridden by TLSClientConfig.ServerName. +func (pc *persistConn) addTLS(ctx context.Context, name string, trace *httptrace.ClientTrace, forProxy bool) error { + // Initiate TLS and check remote host name against certificate. + cfg := cloneTLSConfig(pc.t.TLSClientConfig) + if cfg.ServerName == "" { + cfg.ServerName = name + } + if pc.cacheKey.onlyH1 { + cfg.NextProtos = nil + } + plainConn := pc.conn + tlsConn := tls.Client(plainConn, cfg) + errc := make(chan error, 2) + var timer *time.Timer // for canceling TLS handshake + if d := pc.t.TLSHandshakeTimeout; d != 0 { + timer = time.AfterFunc(d, func() { + errc <- tlsHandshakeTimeoutError{} + }) + } + go func() { + if trace != nil && trace.TLSHandshakeStart != nil { + trace.TLSHandshakeStart() + } + err := tlsConn.HandshakeContext(ctx) + if timer != nil { + timer.Stop() + } + errc <- err + }() + if err := <-errc; err != nil { + plainConn.Close() + if err == (tlsHandshakeTimeoutError{}) { + // Now that we have closed the connection, + // wait for the call to HandshakeContext to return. + <-errc + } + if trace != nil && trace.TLSHandshakeDone != nil { + trace.TLSHandshakeDone(tls.ConnectionState{}, err) + } + return err + } + cs := tlsConn.ConnectionState() + if trace != nil && trace.TLSHandshakeDone != nil { + trace.TLSHandshakeDone(cs, nil) + } + pc.tlsState = &cs + pc.conn = tlsConn + if !forProxy && pc.t.forceHttpVersion == h2 && cs.NegotiatedProtocol != h2internal.NextProtoTLS { + return newHttp2NotSupportedError(cs.NegotiatedProtocol) + } + return nil +} + +func newHttp2NotSupportedError(negotiatedProtocol string) error { + errMsg := "server does not support http2" + if negotiatedProtocol != "" { + errMsg += fmt.Sprintf(", you can use %s which is supported", negotiatedProtocol) + } + return errors.New(errMsg) +} + +func (t *Transport) customTlsHandshake(ctx context.Context, trace *httptrace.ClientTrace, addr string, pconn *persistConn) error { + errc := make(chan error, 2) + var timer *time.Timer // for canceling TLS handshake + if d := t.TLSHandshakeTimeout; d != 0 { + timer = time.AfterFunc(d, func() { + errc <- tlsHandshakeTimeoutError{} + }) + } + go func() { + if trace != nil && trace.TLSHandshakeStart != nil { + trace.TLSHandshakeStart() + } + conn, tlsState, err := t.TLSHandshakeContext(ctx, addr, pconn.conn) + if err != nil { + if timer != nil { + timer.Stop() + } + if trace != nil && trace.TLSHandshakeDone != nil { + trace.TLSHandshakeDone(tls.ConnectionState{}, err) + } + } else { + pconn.conn = conn + pconn.tlsState = tlsState + if trace != nil && trace.TLSHandshakeDone != nil { + trace.TLSHandshakeDone(*tlsState, nil) + } + } + errc <- err + }() + if err := <-errc; err != nil { + pconn.conn.Close() + return err + } + return nil +} + +var testHookProxyConnectTimeout = context.WithTimeout + +func (t *Transport) dialConn(ctx context.Context, cm connectMethod) (pconn *persistConn, err error) { + pconn = &persistConn{ + t: t, + cacheKey: cm.key(), + reqch: make(chan requestAndChan, 1), + writech: make(chan writeRequest, 1), + closech: make(chan struct{}), + writeErrCh: make(chan error, 1), + writeLoopDone: make(chan struct{}), + } + trace := httptrace.ContextClientTrace(ctx) + wrapErr := func(err error) error { + if cm.proxyURL != nil { + // Return a typed error, per Issue 16997 + return &net.OpError{Op: "proxyconnect", Net: "tcp", Err: err} + } + return err + } + if cm.scheme() == "https" && t.hasCustomTLSDialer() { + var err error + pconn.conn, err = t.customDialTLS(ctx, "tcp", cm.addr()) + if err != nil { + return nil, wrapErr(err) + } + if tc, ok := pconn.conn.(reqtls.Conn); ok { + // Handshake here, in case DialTLS didn't. TLSNextProto below + // depends on it for knowing the connection state. + if trace != nil && trace.TLSHandshakeStart != nil { + trace.TLSHandshakeStart() + } + if err := tc.HandshakeContext(ctx); err != nil { + go pconn.conn.Close() + if trace != nil && trace.TLSHandshakeDone != nil { + trace.TLSHandshakeDone(tls.ConnectionState{}, err) + } + return nil, err + } + cs := tc.ConnectionState() + if trace != nil && trace.TLSHandshakeDone != nil { + trace.TLSHandshakeDone(cs, nil) + } + pconn.tlsState = &cs + if cm.proxyURL == nil && pconn.t.forceHttpVersion == h2 && cs.NegotiatedProtocol != h2internal.NextProtoTLS { + return nil, newHttp2NotSupportedError(cs.NegotiatedProtocol) + } + } + } else { + conn, err := t.dial(ctx, "tcp", cm.addr()) + if err != nil { + return nil, wrapErr(err) + } + pconn.conn = conn + if cm.scheme() == "https" { + var firstTLSHost string + if firstTLSHost, _, err = net.SplitHostPort(cm.addr()); err != nil { + return nil, wrapErr(err) + } + if t.TLSHandshakeContext != nil && cm.proxyURL == nil { + err = t.customTlsHandshake(ctx, trace, firstTLSHost, pconn) + if err != nil { + return nil, err + } + } else { + if err = pconn.addTLS(ctx, firstTLSHost, trace, cm.proxyURL != nil); err != nil { + return nil, wrapErr(err) + } + } + } + } + + if t.Debugf != nil && cm.proxyURL != nil { + t.Debugf("connect %s via proxy %s", cm.targetAddr, cm.proxyURL.String()) + } + + // Proxy setup. + switch { + case cm.proxyURL == nil: + // Do nothing. Not using a proxy. + case cm.proxyURL.Scheme == "socks5" || cm.proxyURL.Scheme == "socks5h": + conn := pconn.conn + d := socks.NewDialer("tcp", conn.RemoteAddr().String()) + if u := cm.proxyURL.User; u != nil { + auth := &socks.UsernamePassword{ + Username: u.Username(), + } + auth.Password, _ = u.Password() + d.AuthMethods = []socks.AuthMethod{ + socks.AuthMethodNotRequired, + socks.AuthMethodUsernamePassword, + } + d.Authenticate = auth.Authenticate + } + if _, err := d.DialWithConn(ctx, conn, "tcp", cm.targetAddr); err != nil { + conn.Close() + return nil, err + } + case cm.targetScheme == "http": + pconn.isProxy = true + if pa := cm.proxyAuth(); pa != "" { + pconn.mutateHeaderFunc = func(h http.Header) { + h.Set("Proxy-Authorization", pa) + } + } + case cm.targetScheme == "https": + conn := pconn.conn + var hdr http.Header + if t.GetProxyConnectHeader != nil { + var err error + hdr, err = t.GetProxyConnectHeader(ctx, cm.proxyURL, cm.targetAddr) + if err != nil { + conn.Close() + return nil, err + } + } else { + hdr = t.ProxyConnectHeader + } + if hdr == nil { + hdr = make(http.Header) + } + if pa := cm.proxyAuth(); pa != "" { + hdr = hdr.Clone() + hdr.Set("Proxy-Authorization", pa) + } + connectReq := &http.Request{ + Method: "CONNECT", + URL: &url.URL{Opaque: cm.targetAddr}, + Host: cm.targetAddr, + Header: hdr, + } + + // Set a (long) timeout here to make sure we don't block forever + // and leak a goroutine if the connection stops replying after + // the TCP connect. + connectCtx, cancel := testHookProxyConnectTimeout(ctx, 1*time.Minute) + defer cancel() + + didReadResponse := make(chan struct{}) // closed after CONNECT write+read is done or fails + var ( + resp *http.Response + err error // write or read error + ) + // Write the CONNECT request & read the response. + go func() { + defer close(didReadResponse) + err = connectReq.Write(conn) + if err != nil { + return + } + // Okay to use and discard buffered reader here, because + // TLS server will not speak until spoken to. + br := bufio.NewReader(conn) + resp, err = http.ReadResponse(br, connectReq) + }() + select { + case <-connectCtx.Done(): + conn.Close() + <-didReadResponse + return nil, connectCtx.Err() + case <-didReadResponse: + // resp or err now set + } + if err != nil { + conn.Close() + return nil, err + } + + if t.OnProxyConnectResponse != nil { + err = t.OnProxyConnectResponse(ctx, cm.proxyURL, connectReq, resp) + if err != nil { + conn.Close() + return nil, err + } + } + + if resp.StatusCode != 200 { + _, text, ok := util.CutString(resp.Status, " ") + conn.Close() + if !ok { + return nil, errors.New("unknown status code") + } + return nil, errors.New(text) + } + } + + if cm.proxyURL != nil && cm.targetScheme == "https" { + if t.TLSHandshakeContext != nil { + err := t.customTlsHandshake(ctx, trace, cm.tlsHost(), pconn) + if err != nil { + return nil, err + } + } else { + if err := pconn.addTLS(ctx, cm.tlsHost(), trace, false); err != nil { + return nil, err + } + } + } + + if s := pconn.tlsState; t.forceHttpVersion != h1 && s != nil && s.NegotiatedProtocolIsMutual && s.NegotiatedProtocol != "" { + if s.NegotiatedProtocol == h2internal.NextProtoTLS { + if used, err := t.t2.AddConn(pconn.conn, cm.targetAddr); err != nil { + go pconn.conn.Close() + return nil, err + } else if !used { + go pconn.conn.Close() + } + return &persistConn{t: t, cacheKey: pconn.cacheKey, alt: t.t2}, nil + } + } + + pconn.br = bufio.NewReaderSize(pconn, t.readBufferSize()) + pconn.bw = bufio.NewWriterSize(persistConnWriter{pconn}, t.writeBufferSize()) + + go pconn.readLoop() + go pconn.writeLoop() + return pconn, nil +} + +// persistConnWriter is the io.Writer written to by pc.bw. +// It accumulates the number of bytes written to the underlying conn, +// so the retry logic can determine whether any bytes made it across +// the wire. +// This is exactly 1 pointer field wide so it can go into an interface +// without allocation. +type persistConnWriter struct { + pc *persistConn +} + +func (w persistConnWriter) Write(p []byte) (n int, err error) { + n, err = w.pc.conn.Write(p) + w.pc.nwrite += int64(n) + return +} + +// ReadFrom exposes persistConnWriter's underlying Conn to io.Copy and if +// the Conn implements io.ReaderFrom, it can take advantage of optimizations +// such as sendfile. +func (w persistConnWriter) ReadFrom(r io.Reader) (n int64, err error) { + n, err = io.Copy(w.pc.conn, r) + w.pc.nwrite += n + return +} + +var _ io.ReaderFrom = (*persistConnWriter)(nil) + +// connectMethod is the map key (in its String form) for keeping persistent +// TCP connections alive for subsequent HTTP requests. +// +// A connect method may be of the following types: +// +// connectMethod.key().String() Description +// ------------------------------ ------------------------- +// |http|foo.com http directly to server, no proxy +// |https|foo.com https directly to server, no proxy +// |https,h1|foo.com https directly to server w/o HTTP/2, no proxy +// http://proxy.com|https|foo.com http to proxy, then CONNECT to foo.com +// http://proxy.com|http http to proxy, http to anywhere after that +// socks5://proxy.com|http|foo.com socks5 to proxy, then http to foo.com +// socks5://proxy.com|https|foo.com socks5 to proxy, then https to foo.com +// https://proxy.com|https|foo.com https to proxy, then CONNECT to foo.com +// https://proxy.com|http https to proxy, http to anywhere after that +type connectMethod struct { + _ incomparable + proxyURL *url.URL // nil for no proxy, else full proxy URL + targetScheme string // "http" or "https" + // If proxyURL specifies an http or https proxy, and targetScheme is http (not https), + // then targetAddr is not included in the connect method key, because the socket can + // be reused for different targetAddr values. + targetAddr string + onlyH1 bool // whether to disable HTTP/2 and force HTTP/1 +} + +func (cm *connectMethod) key() connectMethodKey { + proxyStr := "" + targetAddr := cm.targetAddr + if cm.proxyURL != nil { + proxyStr = cm.proxyURL.String() + if (cm.proxyURL.Scheme == "http" || cm.proxyURL.Scheme == "https") && cm.targetScheme == "http" { + targetAddr = "" + } + } + return connectMethodKey{ + proxy: proxyStr, + scheme: cm.targetScheme, + addr: targetAddr, + onlyH1: cm.onlyH1, + } +} + +// scheme returns the first hop scheme: http, https, or socks5 +func (cm *connectMethod) scheme() string { + if cm.proxyURL != nil { + return cm.proxyURL.Scheme + } + return cm.targetScheme +} + +// addr returns the first hop "host:port" to which we need to TCP connect. +func (cm *connectMethod) addr() string { + if cm.proxyURL != nil { + return canonicalAddr(cm.proxyURL) + } + return cm.targetAddr +} + +// tlsHost returns the host name to match against the peer's +// TLS certificate. +func (cm *connectMethod) tlsHost() string { + h := cm.targetAddr + if hasPort(h) { + h = h[:strings.LastIndex(h, ":")] + } + return h +} + +// connectMethodKey is the map key version of connectMethod, with a +// stringified proxy URL (or the empty string) instead of a pointer to +// a URL. +type connectMethodKey struct { + proxy, scheme, addr string + onlyH1 bool +} + +func (k connectMethodKey) String() string { + // Only used by tests. + var h1 string + if k.onlyH1 { + h1 = ",h1" + } + return fmt.Sprintf("%s|%s%s|%s", k.proxy, k.scheme, h1, k.addr) +} + +// persistConn wraps a connection, usually a persistent one +// (but may be used for non-keep-alive requests as well) +type persistConn struct { + // alt optionally specifies the TLS NextProto http.RoundTripper. + // This is used for HTTP/2 today and future protocols later. + // If it's non-nil, the rest of the fields are unused. + alt http.RoundTripper + + t *Transport + cacheKey connectMethodKey + conn net.Conn + tlsState *tls.ConnectionState + br *bufio.Reader // from conn + bw *bufio.Writer // to conn + nwrite int64 // bytes written + reqch chan requestAndChan // written by roundTrip; read by readLoop + writech chan writeRequest // written by roundTrip; read by writeLoop + closech chan struct{} // closed when conn closed + isProxy bool + sawEOF bool // whether we've seen EOF from conn; owned by readLoop + readLimit int64 // bytes allowed to be read; owned by readLoop + // writeErrCh passes the request write error (usually nil) + // from the writeLoop goroutine to the readLoop which passes + // it off to the res.Body reader, which then uses it to decide + // whether or not a connection can be reused. Issue 7569. + writeErrCh chan error + + writeLoopDone chan struct{} // closed when write loop ends + + // Both guarded by Transport.idleMu: + idleAt time.Time // time it last become idle + idleTimer *time.Timer // holding an AfterFunc to close it + + mu sync.Mutex // guards following fields + numExpectedResponses int + closed error // set non-nil when conn is closed, before closech is closed + canceledErr error // set non-nil if conn is canceled + broken bool // an error has happened on this connection; marked broken so it's not reused. + reused bool // whether conn has had successful request/response and is being reused. + // mutateHeaderFunc is an optional func to modify extra + // headers on each outbound request before it's written. (the + // original Request given to RoundTrip is not modified) + mutateHeaderFunc func(http.Header) +} + +// RFC 7234, section 5.4: Should treat +// +// Pragma: no-cache +// +// like +// +// Cache-Control: no-cache +func fixPragmaCacheControl(header http.Header) { + if hp, ok := header["Pragma"]; ok && len(hp) > 0 && hp[0] == "no-cache" { + if _, presentcc := header["Cache-Control"]; !presentcc { + header["Cache-Control"] = []string{"no-cache"} + } + } +} + +// readResponse reads an HTTP response (or two, in the case of "Expect: +// 100-continue") from the server. It returns the final non-100 one. +// trace is optional. +func (pc *persistConn) _readResponse(req *http.Request) (*http.Response, error) { + ds := dump.GetResponseHeaderDumpers(req.Context(), pc.t.Dump) + tp := newTextprotoReader(pc.br, ds) + resp := &http.Response{ + Request: req, + } + + // Parse the first line of the response. + line, err := tp.ReadLine() + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return nil, err + } + proto, status, ok := util.CutString(line, " ") + if !ok { + return nil, badStringError("malformed HTTP response", line) + } + resp.Proto = proto + resp.Status = strings.TrimLeft(status, " ") + + statusCode, _, _ := util.CutString(resp.Status, " ") + if len(statusCode) != 3 { + return nil, badStringError("malformed HTTP status code", statusCode) + } + resp.StatusCode, err = strconv.Atoi(statusCode) + if err != nil || resp.StatusCode < 0 { + return nil, badStringError("malformed HTTP status code", statusCode) + } + if resp.ProtoMajor, resp.ProtoMinor, ok = http.ParseHTTPVersion(resp.Proto); !ok { + return nil, badStringError("malformed HTTP version", resp.Proto) + } + + // Parse the response headers. + mimeHeader, err := tp.ReadMIMEHeader() + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return nil, err + } + resp.Header = http.Header(mimeHeader) + + fixPragmaCacheControl(resp.Header) + + err = readTransfer(resp, pc.br) + if err != nil { + return nil, err + } + + return resp, nil +} + +func (pc *persistConn) maxHeaderResponseSize() int64 { + if v := pc.t.MaxResponseHeaderBytes; v != 0 { + return v + } + return 10 << 20 // conservative default; same as http2 +} + +func (pc *persistConn) Read(p []byte) (n int, err error) { + if pc.readLimit <= 0 { + return 0, fmt.Errorf("read limit of %d bytes exhausted", pc.maxHeaderResponseSize()) + } + if int64(len(p)) > pc.readLimit { + p = p[:pc.readLimit] + } + n, err = pc.conn.Read(p) + if err == io.EOF { + pc.sawEOF = true + } + pc.readLimit -= int64(n) + return +} + +// isBroken reports whether this connection is in a known broken state. +func (pc *persistConn) isBroken() bool { + pc.mu.Lock() + b := pc.closed != nil + pc.mu.Unlock() + return b +} + +// canceled returns non-nil if the connection was closed due to +// CancelRequest or due to context cancellation. +func (pc *persistConn) canceled() error { + pc.mu.Lock() + defer pc.mu.Unlock() + return pc.canceledErr +} + +// isReused reports whether this connection has been used before. +func (pc *persistConn) isReused() bool { + pc.mu.Lock() + r := pc.reused + pc.mu.Unlock() + return r +} + +func (pc *persistConn) cancelRequest(err error) { + pc.mu.Lock() + defer pc.mu.Unlock() + pc.canceledErr = err + pc.closeLocked(common.ErrRequestCanceled) +} + +// closeConnIfStillIdle closes the connection if it's still sitting idle. +// This is what's called by the persistConn's idleTimer, and is run in its +// own goroutine. +func (pc *persistConn) closeConnIfStillIdle() { + t := pc.t + t.idleMu.Lock() + defer t.idleMu.Unlock() + if _, ok := t.idleLRU.m[pc]; !ok { + // Not idle. + return + } + t.removeIdleConnLocked(pc) + pc.close(errIdleConnTimeout) +} + +// mapRoundTripError returns the appropriate error value for +// persistConn.roundTrip. +// +// The provided err is the first error that (*persistConn).roundTrip +// happened to receive from its select statement. +// +// The startBytesWritten value should be the value of pc.nwrite before the roundTrip +// started writing the request. +func (pc *persistConn) mapRoundTripError(req *transportRequest, startBytesWritten int64, err error) error { + if err == nil { + return nil + } + + // Wait for the writeLoop goroutine to terminate to avoid data + // races on callers who mutate the request on failure. + // + // When resc in pc.roundTrip and hence rc.ch receives a responseAndError + // with a non-nil error it implies that the persistConn is either closed + // or closing. Waiting on pc.writeLoopDone is hence safe as all callers + // close closech which in turn ensures writeLoop returns. + <-pc.writeLoopDone + + // If the request was canceled, that's better than network + // failures that were likely the result of tearing down the + // connection. + if cerr := pc.canceled(); cerr != nil { + return cerr + } + + // See if an error was set explicitly. + req.mu.Lock() + reqErr := req.err + req.mu.Unlock() + if reqErr != nil { + return reqErr + } + + if err == errServerClosedIdle { + // Don't decorate + return err + } + + if _, ok := err.(transportReadFromServerError); ok { + if pc.nwrite == startBytesWritten { + return nothingWrittenError{err} + } + // Don't decorate + return err + } + if pc.isBroken() { + if pc.nwrite == startBytesWritten { + return nothingWrittenError{err} + } + return fmt.Errorf("net/http: HTTP/1.x transport connection broken: %w", err) + } + return err +} + +// errCallerOwnsConn is an internal sentinel error used when we hand +// off a writable response.Body to the caller. We use this to prevent +// closing a net.Conn that is now owned by the caller. +var errCallerOwnsConn = errors.New("read loop ending; caller owns writable underlying conn") + +func (pc *persistConn) readLoop() { + closeErr := errReadLoopExiting // default value, if not changed below + defer func() { + pc.close(closeErr) + pc.t.removeIdleConn(pc) + }() + + tryPutIdleConn := func(treq *transportRequest) bool { + trace := treq.trace + if err := pc.t.tryPutIdleConn(pc); err != nil { + closeErr = err + if trace != nil && trace.PutIdleConn != nil && err != errKeepAlivesDisabled { + trace.PutIdleConn(err) + } + return false + } + if trace != nil && trace.PutIdleConn != nil { + trace.PutIdleConn(nil) + } + return true + } + + // eofc is used to block caller goroutines reading from Response.Body + // at EOF until this goroutines has (potentially) added the connection + // back to the idle pool. + eofc := make(chan struct{}) + defer close(eofc) // unblock reader on errors + + // Read this once, before loop starts. (to avoid races in tests) + testHookMu.Lock() + testHookReadLoopBeforeNextRead := testHookReadLoopBeforeNextRead + testHookMu.Unlock() + + alive := true + for alive { + pc.readLimit = pc.maxHeaderResponseSize() + _, err := pc.br.Peek(1) + + pc.mu.Lock() + if pc.numExpectedResponses == 0 { + pc.readLoopPeekFailLocked(err) + pc.mu.Unlock() + return + } + pc.mu.Unlock() + + rc := <-pc.reqch + trace := rc.treq.trace + + var resp *http.Response + if err == nil { + resp, err = pc.readResponse(rc, trace) + } else { + err = transportReadFromServerError{err} + closeErr = err + } + + if err != nil { + if pc.readLimit <= 0 { + err = fmt.Errorf("net/http: server response headers exceeded %d bytes; aborted", pc.maxHeaderResponseSize()) + } + + select { + case rc.ch <- responseAndError{err: err}: + case <-rc.callerGone: + return + } + return + } + pc.readLimit = maxInt64 // effectively no limit for response bodies + + pc.mu.Lock() + pc.numExpectedResponses-- + pc.mu.Unlock() + + bodyWritable := bodyIsWritable(resp) + hasBody := rc.treq.Request.Method != "HEAD" && resp.ContentLength != 0 + + if resp.Close || rc.treq.Request.Close || resp.StatusCode <= 199 || bodyWritable { + // Don't do keep-alive on error if either party requested a close + // or we get an unexpected informational (1xx) response. + // StatusCode 100 is already handled above. + alive = false + } + + if !hasBody || bodyWritable { + // Put the idle conn back into the pool before we send the response + // so if they process it quickly and make another request, they'll + // get this same conn. But we use the unbuffered channel 'rc' + // to guarantee that persistConn.roundTrip got out of its select + // potentially waiting for this persistConn to close. + alive = alive && + !pc.sawEOF && + pc.wroteRequest() && + tryPutIdleConn(rc.treq) + + if bodyWritable { + closeErr = errCallerOwnsConn + } + + select { + case rc.ch <- responseAndError{res: resp}: + case <-rc.callerGone: + return + } + + rc.treq.cancel(errRequestDone) + + // Now that they've read from the unbuffered channel, they're safely + // out of the select that also waits on this goroutine to die, so + // we're allowed to exit now if needed (if alive is false) + testHookReadLoopBeforeNextRead() + continue + } + + waitForBodyRead := make(chan bool, 2) + body := &bodyEOFSignal{ + body: resp.Body, + earlyCloseFn: func() error { + waitForBodyRead <- false + <-eofc // will be closed by deferred call at the end of the function + return nil + }, + fn: func(err error) error { + isEOF := err == io.EOF + waitForBodyRead <- isEOF + if isEOF { + <-eofc // see comment above eofc declaration + } else if err != nil { + if cerr := pc.canceled(); cerr != nil { + return cerr + } + } + return err + }, + } + + resp.Body = body + if rc.addedGzip && ascii.EqualFold(resp.Header.Get("Content-Encoding"), "gzip") { + resp.Body = &gzipReader{body: body} + resp.Header.Del("Content-Encoding") + resp.Header.Del("Content-Length") + resp.ContentLength = -1 + resp.Uncompressed = true + } else if pc.t.AutoDecompression { + contentEncoding := resp.Header.Get("Content-Encoding") + if contentEncoding != "" { + resp.Header.Del("Content-Encoding") + resp.Header.Del("Content-Length") + resp.ContentLength = -1 + resp.Uncompressed = true + resp.Body = compress.NewCompressReader(resp.Body, contentEncoding) + } + } + + select { + case rc.ch <- responseAndError{res: resp}: + case <-rc.callerGone: + return + } + + // Before looping back to the top of this function and peeking on + // the bufio.Reader, wait for the caller goroutine to finish + // reading the response body. (or for cancellation or death) + select { + case bodyEOF := <-waitForBodyRead: + alive = alive && + bodyEOF && + !pc.sawEOF && + pc.wroteRequest() && + tryPutIdleConn(rc.treq) + if bodyEOF { + eofc <- struct{}{} + } + case <-rc.treq.ctx.Done(): + alive = false + pc.cancelRequest(context.Cause(rc.treq.ctx)) + case <-pc.closech: + alive = false + } + + rc.treq.cancel(errRequestDone) + testHookReadLoopBeforeNextRead() + } +} + +func (pc *persistConn) readLoopPeekFailLocked(peekErr error) { + if pc.closed != nil { + return + } + if n := pc.br.Buffered(); n > 0 { + buf, _ := pc.br.Peek(n) + if is408Message(buf) { + pc.closeLocked(errServerClosedIdle) + return + } + log.Printf("Unsolicited response received on idle HTTP channel starting with %q; err=%v", buf, peekErr) + } + if peekErr == io.EOF { + // common case. + pc.closeLocked(errServerClosedIdle) + } else { + pc.closeLocked(fmt.Errorf("readLoopPeekFailLocked: %w", peekErr)) + } +} + +// is408Message reports whether buf has the prefix of an +// HTTP 408 Request Timeout response. +// See golang.org/issue/32310. +func is408Message(buf []byte) bool { + if len(buf) < len("HTTP/1.x 408") { + return false + } + if string(buf[:7]) != "HTTP/1." { + return false + } + return string(buf[8:12]) == " 408" +} + +// readResponse reads an HTTP response (or two, in the case of "Expect: +// 100-continue") from the server. It returns the final non-100 one. +// trace is optional. +func (pc *persistConn) readResponse(rc requestAndChan, trace *httptrace.ClientTrace) (resp *http.Response, err error) { + if trace != nil && trace.GotFirstResponseByte != nil { + if peek, err := pc.br.Peek(1); err == nil && len(peek) == 1 { + trace.GotFirstResponseByte() + } + } + num1xx := 0 // number of informational 1xx headers received + const max1xxResponses = 5 // arbitrary bound on number of informational responses + + continueCh := rc.continueCh + for { + resp, err = pc._readResponse(rc.treq.Request) + if err != nil { + return + } + resCode := resp.StatusCode + if continueCh != nil && resCode == http.StatusContinue { + if trace != nil && trace.Got100Continue != nil { + trace.Got100Continue() + } + continueCh <- struct{}{} + continueCh = nil + } + is1xx := 100 <= resCode && resCode <= 199 + // treat 101 as a terminal status, see issue 26161 + is1xxNonTerminal := is1xx && resCode != http.StatusSwitchingProtocols + + if is1xxNonTerminal { + num1xx++ + if num1xx > max1xxResponses { + return nil, errors.New("net/http: too many 1xx informational responses") + } + pc.readLimit = pc.maxHeaderResponseSize() // reset the limit + if trace != nil && trace.Got1xxResponse != nil { + if err := trace.Got1xxResponse(resCode, textproto.MIMEHeader(resp.Header)); err != nil { + return nil, err + } + } + continue + } + break + } + if isProtocolSwitch(resp) { + resp.Body = newReadWriteCloserBody(pc.br, pc.conn) + } + if continueCh != nil { + // We send an "Expect: 100-continue" header, but the server + // responded with a terminal status and no 100 Continue. + // + // If we're going to keep using the connection, we need to send the request body. + // Tell writeLoop to skip sending the body if we're going to close the connection, + // or to send it otherwise. + // + // The case where we receive a 101 Switching Protocols response is a bit + // ambiguous, since we don't know what protocol we're switching to. + // Conceivably, it's one that doesn't need us to send the body. + // Given that we'll send the body if ExpectContinueTimeout expires, + // be consistent and always send it if we aren't closing the connection. + if resp.Close || rc.treq.Request.Close { + close(continueCh) // don't send the body; the connection will close + } else { + continueCh <- struct{}{} // send the body + } + } + + resp.TLS = pc.tlsState + return +} + +// waitForContinue returns the function to block until +// any response, timeout or connection close. After any of them, +// the function returns a bool which indicates if the body should be sent. +func (pc *persistConn) waitForContinue(continueCh <-chan struct{}) func() bool { + if continueCh == nil { + return nil + } + return func() bool { + timer := time.NewTimer(pc.t.ExpectContinueTimeout) + defer timer.Stop() + + select { + case _, ok := <-continueCh: + return ok + case <-timer.C: + return true + case <-pc.closech: + return false + } + } +} + +func newReadWriteCloserBody(br *bufio.Reader, rwc io.ReadWriteCloser) io.ReadWriteCloser { + body := &readWriteCloserBody{ReadWriteCloser: rwc} + if br.Buffered() != 0 { + body.br = br + } + return body +} + +// readWriteCloserBody is the Response.Body type used when we want to +// give users write access to the Body through the underlying +// connection (TCP, unless using custom dialers). This is then +// the concrete type for a Response.Body on the 101 Switching +// Protocols response, as used by WebSockets, h2c, etc. +type readWriteCloserBody struct { + _ incomparable + br *bufio.Reader // used until empty + io.ReadWriteCloser +} + +func (b *readWriteCloserBody) Read(p []byte) (n int, err error) { + if b.br != nil { + if n := b.br.Buffered(); len(p) > n { + p = p[:n] + } + n, err = b.br.Read(p) + if b.br.Buffered() == 0 { + b.br = nil + } + return n, err + } + return b.ReadWriteCloser.Read(p) +} + +// nothingWrittenError wraps a write errors which ended up writing zero bytes. +type nothingWrittenError struct { + error +} + +func (nwe nothingWrittenError) Unwrap() error { + return nwe.error +} + +func (pc *persistConn) writeLoop() { + defer close(pc.writeLoopDone) + for { + select { + case wr := <-pc.writech: + startBytesWritten := pc.nwrite + err := pc.writeRequest(wr.req.Request, pc.bw, pc.isProxy, wr.req.extra, pc.waitForContinue(wr.continueCh)) + if bre, ok := err.(requestBodyReadError); ok { + err = bre.error + // Errors reading from the user's + // Request.Body are high priority. + // Set it here before sending on the + // channels below or calling + // pc.close() which tears down + // connections and causes other + // errors. + wr.req.setError(err) + } + if err == nil { + err = pc.bw.Flush() + } + if err != nil { + if pc.nwrite == startBytesWritten { + err = nothingWrittenError{err} + } + } + pc.writeErrCh <- err // to the body reader, which might recycle us + wr.ch <- err // to the roundTrip function + if err != nil { + pc.close(err) + return + } + case <-pc.closech: + return + } + } +} + +// extraHeaders may be nil +// waitForContinue may be nil +// always closes body +func (pc *persistConn) writeRequest(r *http.Request, w io.Writer, usingProxy bool, extraHeaders http.Header, waitForContinue func() bool) (err error) { + trace := httptrace.ContextClientTrace(r.Context()) + if trace != nil && trace.WroteRequest != nil { + defer func() { + trace.WroteRequest(httptrace.WroteRequestInfo{ + Err: err, + }) + }() + } + closed := false + defer func() { + if closed { + return + } + if closeErr := closeRequestBody(r); closeErr != nil && err == nil { + err = closeErr + } + }() + + // Find the target host. Prefer the Host: header, but if that + // is not given, use the host from the request URL. + // + // Clean the host, in case it arrives with unexpected stuff in it. + host := r.Host + if host == "" { + if r.URL == nil { + return errMissingHost + } + host = r.URL.Host + } + host, err = httpguts.PunycodeHostPort(host) + if err != nil { + return err + } + + // Validate that the Host header is a valid header in general, + // but don't validate the host itself. This is sufficient to avoid + // header or request smuggling via the Host field. + // The server can (and will, if it's a net/http server) reject + // the request if it doesn't consider the host valid. + if !httpguts.ValidHostHeader(host) { + // Historically, we would truncate the Host header after '/' or ' '. + // Some users have relied on this truncation to convert a network + // address such as Unix domain socket path into a valid, ignored + // Host header (see https://go.dev/issue/61431). + // + // We don't preserve the truncation, because sending an altered + // header field opens a smuggling vector. Instead, zero out the + // Host header entirely if it isn't valid. (An empty Host is valid; + // see RFC 9112 Section 3.2.) + // + // Return an error if we're sending to a proxy, since the proxy + // probably can't do anything useful with an empty Host header. + if !usingProxy { + host = "" + } else { + return errors.New("http: invalid Host header") + } + } + + // According to RFC 6874, an HTTP client, proxy, or other + // intermediary must remove any IPv6 zone identifier attached + // to an outgoing URI. + host = removeZone(host) + + ruri := r.URL.RequestURI() + if usingProxy && r.URL.Scheme != "" && r.URL.Opaque == "" { + ruri = r.URL.Scheme + "://" + host + ruri + } else if r.Method == "CONNECT" && r.URL.Path == "" { + // CONNECT requests normally give just the host and port, not a full URL. + ruri = host + if r.URL.Opaque != "" { + ruri = r.URL.Opaque + } + } + if stringContainsCTLByte(ruri) { + return errors.New("net/http: can't write control character in Request.URL") + } + // TODO: validate r.Method too? At least it's less likely to + // come from an attacker (more likely to be a constant in + // code). + + // Wrap the writer in a bufio Writer if it's not already buffered. + // Don't always call NewWriter, as that forces a bytes.Buffer + // and other small bufio Writers to have a minimum 4k buffer + // size. + var bw *bufio.Writer + if _, ok := w.(io.ByteWriter); !ok { + bw = bufio.NewWriter(w) + w = bw + } + + rw := w // raw writer + dumps := dump.GetDumpers(r.Context(), pc.t.Dump) + for _, dump := range dumps { + if dump.RequestHeader() { + w = dump.WrapRequestHeaderWriter(w) + } + } + + _, err = fmt.Fprintf(w, "%s %s HTTP/1.1\r\n", valueOrDefault(r.Method, "GET"), ruri) + if err != nil { + return err + } + + _writeHeader := func(key string, values ...string) error { + for _, value := range values { + _, err := fmt.Fprintf(w, "%s: %s\r\n", key, value) + if err != nil { + return err + } + } + if trace != nil && trace.WroteHeaderField != nil { + trace.WroteHeaderField(key, values) + } + return nil + } + + var writeHeader func(key string, values ...string) error + var kvs []header.KeyValues + sort := false + + if r.Header != nil && len(r.Header[header.HeaderOderKey]) > 0 { + writeHeader = func(key string, values ...string) error { + kvs = append(kvs, header.KeyValues{ + Key: key, + Values: values, + }) + return nil + } + sort = true + } else { + writeHeader = _writeHeader + } + // Header lines + err = writeHeader("Host", host) + if err != nil { + return err + } + + // Use the defaultUserAgent unless the Header contains one, which + // may be blank to not send the header. + userAgent := header.DefaultUserAgent + if headerHas(r.Header, "User-Agent") { + userAgent = r.Header.Get("User-Agent") + } + if userAgent != "" { + err = writeHeader("User-Agent", userAgent) + if err != nil { + return err + } + } + + // Process Body,ContentLength,Close,Trailer + tw, err := newTransferWriter(r) + if err != nil { + return err + } + err = tw.writeHeader(writeHeader) + if err != nil { + return err + } + + err = headerWriteSubset(r.Header, reqWriteExcludeHeader, writeHeader, sort) + if err != nil { + return err + } + + if extraHeaders != nil { + err = headerWrite(extraHeaders, writeHeader, sort) + if err != nil { + return err + } + } + + if sort { // sort and write headers + header.SortKeyValues(kvs, r.Header[header.HeaderOderKey]) + for _, kv := range kvs { + _writeHeader(kv.Key, kv.Values...) + } + } + + _, err = io.WriteString(w, "\r\n") + if err != nil { + return err + } + + if trace != nil && trace.WroteHeaders != nil { + trace.WroteHeaders() + } + + // Flush and wait for 100-continue if expected. + if waitForContinue != nil { + if bw, ok := w.(*bufio.Writer); ok { + err = bw.Flush() + if err != nil { + return err + } + } + if trace != nil && trace.Wait100Continue != nil { + trace.Wait100Continue() + } + if !waitForContinue() { + closed = true + closeRequestBody(r) + return nil + } + } + + if bw, ok := w.(*bufio.Writer); ok && tw.FlushHeaders { + if err := bw.Flush(); err != nil { + return err + } + } + + // Write body and trailer + closed = true + err = tw.writeBody(rw, dumps) + if err != nil { + if tw.bodyReadError == err { + err = requestBodyReadError{err} + } + return err + } + + if bw != nil { + return bw.Flush() + } + return nil +} + +// maxWriteWaitBeforeConnReuse is how long the a Transport RoundTrip +// will wait to see the Request's Body.Write result after getting a +// response from the server. See comments in (*persistConn).wroteRequest. +// +// In tests, we set this to a large value to avoid flakiness from inconsistent +// recycling of connections. +var maxWriteWaitBeforeConnReuse = 50 * time.Millisecond + +// wroteRequest is a check before recycling a connection that the previous write +// (from writeLoop above) happened and was successful. +func (pc *persistConn) wroteRequest() bool { + select { + case err := <-pc.writeErrCh: + // Common case: the write happened well before the response, so + // avoid creating a timer. + return err == nil + default: + // Rare case: the request was written in writeLoop above but + // before it could send to pc.writeErrCh, the reader read it + // all, processed it, and called us here. In this case, give the + // write goroutine a bit of time to finish its send. + // + // Less rare case: We also get here in the legitimate case of + // Issue 7569, where the writer is still writing (or stalled), + // but the server has already replied. In this case, we don't + // want to wait too long, and we want to return false so this + // connection isn't re-used. + t := time.NewTimer(maxWriteWaitBeforeConnReuse) + defer t.Stop() + select { + case err := <-pc.writeErrCh: + return err == nil + case <-t.C: + return false + } + } +} + +// responseAndError is how the goroutine reading from an HTTP/1 server +// communicates with the goroutine doing the RoundTrip. +type responseAndError struct { + _ incomparable + res *http.Response // else use this response (see res method) + err error +} + +type requestAndChan struct { + _ incomparable + treq *transportRequest + ch chan responseAndError // unbuffered; always send in select on callerGone + + // whether the Transport (as opposed to the user client code) + // added the Accept-Encoding gzip header. If the Transport + // set it, only then do we transparently decode the gzip. + addedGzip bool + + // Optional blocking chan for Expect: 100-continue (for send). + // If the request has an "Expect: 100-continue" header and + // the server responds 100 Continue, readLoop send a value + // to writeLoop via this chan. + continueCh chan<- struct{} + + callerGone <-chan struct{} // closed when roundTrip caller has returned +} + +// A writeRequest is sent by the caller's goroutine to the +// writeLoop's goroutine to write a request while the read loop +// concurrently waits on both the write response and the server's +// reply. +type writeRequest struct { + req *transportRequest + ch chan<- error + + // Optional blocking chan for Expect: 100-continue (for receive). + // If not nil, writeLoop blocks sending request body until + // it receives from this chan. + continueCh <-chan struct{} +} + +// httpTimeoutError represents a timeout. +// It implements net.Error and wraps context.DeadlineExceeded. +type timeoutError struct { + err string +} + +func (e *timeoutError) Error() string { return e.err } +func (e *timeoutError) Timeout() bool { return true } +func (e *timeoutError) Temporary() bool { return true } +func (e *timeoutError) Is(err error) bool { return err == context.DeadlineExceeded } + +var errTimeout error = &timeoutError{"net/http: timeout awaiting response headers"} + +var errRequestCanceledConn = errors.New("net/http: request canceled while waiting for connection") // TODO: unify? + +// errRequestDone is used to cancel the round trip Context after a request is successfully done. +// It should not be seen by the user. +var errRequestDone = errors.New("net/http: request completed") + +func nop() {} + +// testHooks. Always non-nil. +var ( + testHookEnterRoundTrip = nop + testHookWaitResLoop = nop + testHookRoundTripRetried = nop + testHookPrePendingDial = nop + testHookPostPendingDial = nop + + testHookMu sync.Locker = fakeLocker{} // guards following + testHookReadLoopBeforeNextRead = nop +) + +func (pc *persistConn) roundTrip(req *transportRequest) (resp *http.Response, err error) { + if pc.t.Debugf != nil { + pc.t.Debugf("HTTP/1.1 %s %s", req.Method, req.URL.String()) + } + testHookEnterRoundTrip() + pc.mu.Lock() + pc.numExpectedResponses++ + headerFn := pc.mutateHeaderFunc + pc.mu.Unlock() + + if headerFn != nil { + headerFn(req.extraHeaders()) + } + + // Ask for a compressed version if the caller didn't set their + // own value for Accept-Encoding. We only attempt to + // uncompress the gzip stream if we were the layer that + // requested it. + requestedGzip := false + if !pc.t.DisableCompression && + req.Header.Get("Accept-Encoding") == "" && + req.Header.Get("Range") == "" && + req.Method != "HEAD" { + // Request gzip only, not deflate. Deflate is ambiguous and + // not as universally supported anyway. + // See: https://zlib.net/zlib_faq.html#faq39 + // + // Note that we don't request this for HEAD requests, + // due to a bug in nginx: + // https://trac.nginx.org/nginx/ticket/358 + // https://golang.org/issue/5522 + // + // We don't request gzip if the request is for a range, since + // auto-decoding a portion of a gzipped document will just fail + // anyway. See https://golang.org/issue/8923 + requestedGzip = true + req.extraHeaders().Set("Accept-Encoding", "gzip") + } + + var continueCh chan struct{} + if req.ProtoAtLeast(1, 1) && req.Body != nil && reqExpectsContinue(req.Request) { + continueCh = make(chan struct{}, 1) + } + + if pc.t.DisableKeepAlives && + !reqWantsClose(req.Request) && + !isProtocolSwitchHeader(req.Header) { + req.extraHeaders().Set("Connection", "close") + } + + gone := make(chan struct{}) + defer close(gone) + + const debugRoundTrip = false + + // Write the request concurrently with waiting for a response, + // in case the server decides to reply before reading our full + // request body. + startBytesWritten := pc.nwrite + writeErrCh := make(chan error, 1) + pc.writech <- writeRequest{req, writeErrCh, continueCh} + + resc := make(chan responseAndError) + pc.reqch <- requestAndChan{ + treq: req, + ch: resc, + addedGzip: requestedGzip, + continueCh: continueCh, + callerGone: gone, + } + handleResponse := func(re responseAndError) (*http.Response, error) { + if (re.res == nil) == (re.err == nil) { + panic(fmt.Sprintf("internal error: exactly one of res or err should be set; nil=%v", re.res == nil)) + } + if debugRoundTrip { + req.logf("resc recv: %p, %T/%#v", re.res, re.err, re.err) + } + if re.err != nil { + return nil, pc.mapRoundTripError(req, startBytesWritten, re.err) + } + return re.res, nil + } + + var respHeaderTimer <-chan time.Time + ctxDoneChan := req.ctx.Done() + pcClosed := pc.closech + for { + testHookWaitResLoop() + select { + case err := <-writeErrCh: + if debugRoundTrip { + req.logf("writeErrCh recv: %T/%#v", err, err) + } + if err != nil { + pc.close(fmt.Errorf("write error: %w", err)) + return nil, pc.mapRoundTripError(req, startBytesWritten, err) + } + if d := pc.t.ResponseHeaderTimeout; d > 0 { + if debugRoundTrip { + req.logf("starting timer for %v", d) + } + timer := time.NewTimer(d) + defer timer.Stop() // prevent leaks + respHeaderTimer = timer.C + } + case <-pcClosed: + select { + case re := <-resc: + // The pconn closing raced with the response to the request, + // probably after the server wrote a response and immediately + // closed the connection. Use the response. + return handleResponse(re) + default: + } + if debugRoundTrip { + req.logf("closech recv: %T %#v", pc.closed, pc.closed) + } + return nil, pc.mapRoundTripError(req, startBytesWritten, pc.closed) + case <-respHeaderTimer: + if debugRoundTrip { + req.logf("timeout waiting for response headers.") + } + pc.close(errTimeout) + return nil, errTimeout + case re := <-resc: + return handleResponse(re) + case <-ctxDoneChan: + select { + case re := <-resc: + // readLoop is responsible for canceling req.ctx after + // it reads the response body. Check for a response racing + // the context close, and use the response if available. + return handleResponse(re) + default: + } + pc.cancelRequest(context.Cause(req.ctx)) + } + } +} + +// tLogKey is a context WithValue key for test debugging contexts containing +// a t.Logf func. See export_test.go's Request.WithT method. +type tLogKey struct{} + +func (tr *transportRequest) logf(format string, args ...interface{}) { + if logf, ok := tr.Request.Context().Value(tLogKey{}).(func(string, ...interface{})); ok { + logf(time.Now().Format(time.RFC3339Nano)+": "+format, args...) + } +} + +// markReused marks this connection as having been successfully used for a +// request and response. +func (pc *persistConn) markReused() { + pc.mu.Lock() + pc.reused = true + pc.mu.Unlock() +} + +// close closes the underlying TCP connection and closes +// the pc.closech channel. +// +// The provided err is only for testing and debugging; in normal +// circumstances it should never be seen by users. +func (pc *persistConn) close(err error) { + pc.mu.Lock() + defer pc.mu.Unlock() + pc.closeLocked(err) +} + +func (pc *persistConn) closeLocked(err error) { + if err == nil { + panic("nil error") + } + pc.broken = true + if pc.closed == nil { + pc.closed = err + pc.t.decConnsPerHost(pc.cacheKey) + // Close HTTP/1 (pc.alt == nil) connection. + // HTTP/2 closes its connection itself. + if pc.alt == nil { + if err != errCallerOwnsConn { + pc.conn.Close() + } + close(pc.closech) + } + } + pc.mutateHeaderFunc = nil +} + +var portMap = map[string]string{ + "http": "80", + "https": "443", + "socks5": "1080", + "socks5h": "1080", +} + +func idnaASCIIFromURL(url *url.URL) string { + addr := url.Hostname() + if v, err := idnaASCII(addr); err == nil { + addr = v + } + return addr +} + +// canonicalAddr returns url.Host but always with a ":port" suffix. +func canonicalAddr(url *url.URL) string { + port := url.Port() + if port == "" { + port = portMap[url.Scheme] + } + return net.JoinHostPort(idnaASCIIFromURL(url), port) +} + +// bodyEOFSignal is used by the HTTP/1 transport when reading response +// bodies to make sure we see the end of a response body before +// proceeding and reading on the connection again. +// +// It wraps a ReadCloser but runs fn (if non-nil) at most +// once, right before its final (error-producing) Read or Close call +// returns. fn should return the new error to return from Read or Close. +// +// If earlyCloseFn is non-nil and Close is called before io.EOF is +// seen, earlyCloseFn is called instead of fn, and its return value is +// the return value from Close. +type bodyEOFSignal struct { + body io.ReadCloser + mu sync.Mutex // guards following 4 fields + closed bool // whether Close has been called + rerr error // sticky Read error + fn func(error) error // err will be nil on Read io.EOF + earlyCloseFn func() error // optional alt Close func used if io.EOF not seen +} + +var errReadOnClosedResBody = errors.New("http: read on closed response body") + +func (es *bodyEOFSignal) Read(p []byte) (n int, err error) { + es.mu.Lock() + closed, rerr := es.closed, es.rerr + es.mu.Unlock() + if closed { + return 0, errReadOnClosedResBody + } + if rerr != nil { + return 0, rerr + } + + n, err = es.body.Read(p) + if err != nil { + es.mu.Lock() + defer es.mu.Unlock() + if es.rerr == nil { + es.rerr = err + } + err = es.condfn(err) + } + return +} + +func (es *bodyEOFSignal) Close() error { + es.mu.Lock() + defer es.mu.Unlock() + if es.closed { + return nil + } + es.closed = true + if es.earlyCloseFn != nil && es.rerr != io.EOF { + return es.earlyCloseFn() + } + err := es.body.Close() + return es.condfn(err) +} + +// caller must hold es.mu. +func (es *bodyEOFSignal) condfn(err error) error { + if es.fn == nil { + return err + } + err = es.fn(err) + es.fn = nil + return err +} + +// gzipReader wraps a response body so it can lazily +// call gzip.NewReader on the first call to Read +type gzipReader struct { + _ incomparable + body *bodyEOFSignal // underlying HTTP/1 response body framing + zr *gzip.Reader // lazily-initialized gzip reader + zerr error // any error from gzip.NewReader; sticky +} + +func (gz *gzipReader) Read(p []byte) (n int, err error) { + if gz.zr == nil { + if gz.zerr == nil { + gz.zr, gz.zerr = gzip.NewReader(gz.body) + } + if gz.zerr != nil { + return 0, gz.zerr + } + } + + gz.body.mu.Lock() + if gz.body.closed { + err = errReadOnClosedResBody + } + gz.body.mu.Unlock() + + if err != nil { + return 0, err + } + return gz.zr.Read(p) +} + +func (gz *gzipReader) Close() error { + return gz.body.Close() +} + +type tlsHandshakeTimeoutError struct{} + +func (tlsHandshakeTimeoutError) Timeout() bool { return true } +func (tlsHandshakeTimeoutError) Temporary() bool { return true } +func (tlsHandshakeTimeoutError) Error() string { return "net/http: TLS handshake timeout" } + +// fakeLocker is a sync.Locker which does nothing. It's used to guard +// test-only fields when not under test, to avoid runtime atomic +// overhead. +type fakeLocker struct{} + +func (fakeLocker) Lock() {} +func (fakeLocker) Unlock() {} + +// cloneTLSConfig returns a shallow clone of cfg, or a new zero tls.Config if +// cfg is nil. This is safe to call even if cfg is in active use by a TLS +// client or server. +// +// cloneTLSConfig should be an internal detail, +// but widely used packages access it using linkname. +// Notable members of the hall of shame include: +// - github.com/searKing/golang +// +// Do not remove or change the type signature. +// See go.dev/issue/67401. +// +//go:linkname cloneTLSConfig +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + return cfg.Clone() +} + +type connLRU struct { + ll *list.List // list.Element.Value type is of *persistConn + m map[*persistConn]*list.Element +} + +// add adds pc to the head of the linked list. +func (cl *connLRU) add(pc *persistConn) { + if cl.ll == nil { + cl.ll = list.New() + cl.m = make(map[*persistConn]*list.Element) + } + ele := cl.ll.PushFront(pc) + if _, ok := cl.m[pc]; ok { + panic("persistConn was already in LRU") + } + cl.m[pc] = ele +} + +func (cl *connLRU) removeOldest() *persistConn { + ele := cl.ll.Back() + pc := ele.Value.(*persistConn) + cl.ll.Remove(ele) + delete(cl.m, pc) + return pc +} + +// remove removes pc from cl. +func (cl *connLRU) remove(pc *persistConn) { + if ele, ok := cl.m[pc]; ok { + cl.ll.Remove(ele) + delete(cl.m, pc) + } +} + +// len returns the number of items in the cache. +func (cl *connLRU) len() int { + return len(cl.m) +} diff --git a/transport_default_other.go b/transport_default_other.go new file mode 100644 index 00000000..7d4e9103 --- /dev/null +++ b/transport_default_other.go @@ -0,0 +1,16 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !js || !wasm + +package req + +import ( + "context" + "net" +) + +func defaultTransportDialContext(dialer *net.Dialer) func(context.Context, string, string) (net.Conn, error) { + return dialer.DialContext +} diff --git a/transport_default_wasm.go b/transport_default_wasm.go new file mode 100644 index 00000000..731acdf2 --- /dev/null +++ b/transport_default_wasm.go @@ -0,0 +1,16 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (js && wasm) || wasip1 + +package req + +import ( + "context" + "net" +) + +func defaultTransportDialContext(dialer *net.Dialer) func(context.Context, string, string) (net.Conn, error) { + return nil +}